repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
IdeaSolutionsOnline/ERP4R | core/objs/sai_pesquisacont.py | 1 | 4546 | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = ['António Anacleto', 'Jair Medina']
__credits__ = []
__version__ = "1.0"
__maintainer__ = ['António Anacleto', 'Jair Medina']
__status__ = "Development"
__model_name__= 'sai_pesquisacont.SaiPesquisacont'
#import base_models#auth,
from orm import *
from form import *
class SaiPesquisacont (Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sai_pesquisacont'
self.__title__= 'Por Contribuinte'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__db_mode__ = 'None'
self.__workflow__ = (
'estado', {'Rascunho':['Confirmar'], 'Confirmado':['Imprimir','Exportar']}
)
self.__workflow_auth__ = {
'Confirmar':['Gestor'],
'Rascunho':['Gestor'],
'Exportar':['All'],
'Imprimir':['All'],
}
self.__no_edit__ = [
('estado', ['Confirmado','Impresso'])
]
self.__auth__ = {
'read':['All'],
'write':['Técnico DNRE','Gestor DNRE' ],
'create':['Administrador'],
'delete':['Administrador'],
'full_access':['Administrador']
}
self.nu_nif = integer_field(view_order=1, name='NIF', size=80)
self.cliente = boolean_field(view_order = 2, name = 'Cliente?', default = False)
self.fornecedor = boolean_field(view_order = 3, name = 'Fornecedor?', default = True)
self.estado = info_field(view_order=4, name ='Estado', default='Confirmado', hidden=True, nolabel=True,)
def prepare_data(self):
nu_nif = bottle.request.forms.get('nu_nif')
descricao = 'Aletras e Infrações de um Contribuinte'
cliente = bottle.request.forms.get('cliente')
record = {}
#print(nu_nif, cliente)
if cliente == 'False' :
sql="""select nu_nif, nm_contribuinte, dt_periodo, nu_nif_anexo, nm_contribuinte_anexo,nu_factura,dt_factura,vl_factura,vl_liquidado,validar_iva, nif_valido,declarado,info_valido from anexo_cli_out_13 where nu_nif= '{nif}' and validar_iva =1 or nu_nif='{nif}' and nif_valido = false or nu_nif='{nif}' and declarado = false or nu_nif='{nif}' and info_valido = false ORDER BY dt_periodo DESC""".format(nif=nu_nif)
data = run_sql(sql)
for i in data:
record['contribuinte']= i['nm_contribuinte']
break
record['sql2']=sql
record['nu_nif'] = nu_nif
record['lines'] = data
record['nome'] ='Cliente'
record['descricao'] = descricao
return record
else:
sql="""select nu_nif, nm_contribuinte, dt_periodo, nu_nif_anexo, nm_contribuinte_anexo,nu_factura,dt_factura,vl_factura,vl_dedutivel,validar_iva, nif_valido,declarado,info_valido from anexo_for_out_13 where nu_nif= '{nif}' and validar_iva =1 or nu_nif='{nif}' and nif_valido = false or nu_nif='{nif}' and declarado = false or nu_nif='{nif}' and info_valido = false ORDER BY dt_periodo DESC""".format(nif=nu_nif)
data = run_sql(sql)
for i in data:
record['contribuinte']= i['nm_contribuinte']
break
record['sql2']=sql
record['nu_nif'] = nu_nif
record['lines'] = data
record['nome'] ='Fornecedor'
record['descricao'] = descricao
return record
def Imprimir(self, key, window_id):
record = self.prepare_data()
if record['nome'] == 'Fornecedor':
template = 'sai_contribuintefor'
return Report(record=record, report_template=template).show()
else:
template = 'sai_contribuintecli'
return Report(record=record, report_template=template).show()
def Exportar(self, key, window_id):
x=self.prepare_data()
#record = get_records_to_print(key=key, model=self)
#print (record, key)
sql = x['sql2'] #record['sql']
print(sql, 'noooooooooo Exportar')
# variaveis = record['linha_sql_report']
# if variaveis:
# variaveis_dict = {}
# for variavel in variaveis:
# variaveis_dict[variavel['variavel']] = variavel['valor']
# sql = sql.format(**variaveis_dict)
result = run_sql(sql)
return data_to_csv(result, self, 'Gravar')
#253298121
| mit | -1,430,141,858,512,203,300 | 35.328 | 426 | 0.558027 | false | 3.1822 | false | false | false |
linkslice/ZenPacks.community.EMCIsilon | ZenPacks/community/EMCIsilon/modeler/plugins/community/snmp/EMCIsilonDiskPerfs.py | 1 | 1526 | from Products.DataCollector.plugins.CollectorPlugin import (
SnmpPlugin, GetTableMap,
)
class EMCIsilonDiskPerfs(SnmpPlugin):
relname = 'emcisilon_diskperfs'
modname = 'ZenPacks.community.EMCIsilon.EMCIsilonDiskPerf'
snmpGetTableMaps = (
GetTableMap(
'diskPerfTable', '.1.3.6.1.4.1.12124.2.2.52.1', {
'.1': 'diskPerfBay',
'.2': 'diskPerfDeviceName',
'.3': 'diskperfOpsPerSecond',
'.4': 'diskperfInBitsPerSecond',
'.5': 'diskperfOutBitsPerSecond',
}
),
)
def process(self, device, results, log):
emcisilon_diskperfs = results[1].get('diskPerfTable', {})
rm = self.relMap()
for snmpindex, row in emcisilon_diskperfs.items():
name = row.get('diskPerfDeviceName')
if not name:
log.warn('Skipping empty disk perf stats')
continue
log.debug('found disk perf stats: %s at %s', name, snmpindex.strip('.'))
rm.append(self.objectMap({
'id': self.prepId(name),
'title': name,
'snmpindex': snmpindex.strip('.'),
'disk_perf_ops_per_second': row.get('diskperfOpsPerSecond'),
'disk_perf_in_bits_per_second': row.get('diskperfInBitsPerSecond'),
'disk_perf_out_bits_per_second': row.get('diskperfOutBitsPerSecond'),
}))
log.debug(rm)
return rm
| mit | 513,000,661,527,704,640 | 34.488372 | 85 | 0.542595 | false | 3.740196 | false | false | false |
ellak-monades-aristeias/enhydris | enhydris/hcore/migrations/0006_offset_help.py | 1 | 2090 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hcore', '0005_remove_is_active'),
]
operations = [
migrations.AlterField(
model_name='timeseries',
name='timestamp_offset_minutes',
field=models.IntegerField(help_text='If unsure, set this to zero. It indicates the difference of what is shown from what is meant. For example, if for an hourly time series it is -5, then 2015-10-14 11:00 means the interval from 2015-10-14 09:55 to 2015-10-14 10:55. -1440 is common for daily time series.', null=True, blank=True),
),
migrations.AlterField(
model_name='timeseries',
name='timestamp_offset_months',
field=models.SmallIntegerField(help_text='If unsure, set this to 1 for monthly, 12 for annual, and zero otherwise. For a monthly time series, an offset of -475 minutes and 1 month means that 2003-11-01 00:00 (normally shown as 2003-11) denotes the interval 2003-10-31 18:05 to 2003-11-30 18:05.', null=True, blank=True),
),
migrations.AlterField(
model_name='timeseries',
name='timestamp_rounding_minutes',
field=models.PositiveIntegerField(help_text='For an hourly time series whose timestamps end in :00, set this to zero; if they end in :12, set it to 12. For a ten-minute time series with timestamps ending in :12, :22, :32, etc., set it to 2. For daily ending at 08:00, set it to 480. Leave empty if timestamps are irregular.', null=True, blank=True),
),
migrations.AlterField(
model_name='timeseries',
name='timestamp_rounding_months',
field=models.PositiveSmallIntegerField(help_text='Set this to zero, except for annual time series, indicating the difference from January; for example, set it to 9 if the timestamps use a hydrological year starting in October. Leave empty if timestamps are irregular.', null=True, blank=True),
),
]
| agpl-3.0 | -1,065,025,794,875,866,500 | 60.470588 | 362 | 0.673206 | false | 4.058252 | false | false | false |
crackcell/www-entity-mining | software/game/17173.com/crawler.py | 1 | 1873 | #!/usr/bin/env python
# -*- encoding: utf-8; indent-tabs-mode: nil -*-
"""
crawler
~~~~~~~
desc
:copyright: (c) 2015 Menglong TAN.
"""
import os
import sys
import re
import urllib2
import time
import BeautifulSoup
import logging
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(fmt)
logger.addHandler(ch)
class Star(object):
def __init__(self):
self.name = ""
self.gender = ""
self.nation = ""
self.birth = ""
self.horoscope = ""
self.height = ""
def __repr__(self):
return "%s\t%s\t%s\t%s\t%s\t%s" % (self.name, self.gender, self.nation,
self.birth, self.horoscope,
self.height)
def extract_list(url):
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
req = urllib2.Request(url, headers=headers)
resp = None
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print "Error Code:", e.code
return
except urllib2.URLError, e:
print "Error Reason:", e.reason
return
soup = BeautifulSoup.BeautifulSoup(resp.read())
games = []
cnt = 0
for html in soup.findAll("a", attrs={"class":"link"}):
games.append(str(html.contents[0]))
cnt += 1
logger.info("progress: %d", cnt)
return games
if __name__ == "__main__":
list_url = "http://www.17173.com/zq/all.shtml"
f = open("games.dat", "w+")
logger.info("progress")
games = extract_list(list_url)
for game in games:
f.write(game + "\n")
f.flush()
f.close()
| bsd-3-clause | -3,796,389,506,530,348,000 | 23.973333 | 116 | 0.566471 | false | 3.320922 | false | false | false |
JungeAlexander/cocoscore | ci/bootstrap.py | 1 | 2124 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
if __name__ == "__main__":
base_path = dirname(dirname(abspath(__file__)))
print("Project path: {0}".format(base_path))
env_path = join(base_path, ".tox", "bootstrap")
if sys.platform == "win32":
bin_path = join(env_path, "Scripts")
else:
bin_path = join(env_path, "bin")
if not exists(env_path):
import subprocess
print("Making bootstrap env in: {0} ...".format(env_path))
try:
subprocess.check_call(["virtualenv", env_path])
except subprocess.CalledProcessError:
subprocess.check_call([sys.executable, "-m", "virtualenv", env_path])
print("Installing `jinja2` into bootstrap environment...")
subprocess.check_call([join(bin_path, "pip"), "install", "jinja2"])
python_executable = join(bin_path, "python")
if not os.path.samefile(python_executable, sys.executable):
print("Re-executing with: {0}".format(python_executable))
os.execv(python_executable, [python_executable, __file__])
import jinja2
import subprocess
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = [
line.strip()
# WARNING: 'tox' must be installed globally or in the project's virtualenv
for line in subprocess.check_output(['tox', '--listenvs'], universal_newlines=True).splitlines()
]
tox_environments = [line for line in tox_environments if line not in ['clean', 'report', 'docs', 'check']]
for name in os.listdir(join("ci", "templates")):
with open(join(base_path, name), "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
| mit | 5,956,504,451,291,047,000 | 35.62069 | 110 | 0.636064 | false | 3.75265 | false | false | false |
wiredrive/wtframework | wtframework/wtf/testobjects/tests/test_watched_test_case.py | 1 | 5126 | ##########################################################################
# This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from mockito import mock
from wtframework.wtf.testobjects.testcase import WatchedTestCase
import unittest2
__wtf_test_logger_var = []
class TestWatchedTestCaseWatchers(WatchedTestCase):
def __init__(self, *args, **kwargs):
super(TestWatchedTestCaseWatchers, self).__init__(*args, **kwargs)
self.watcher = LoggerTestWatcher()
self._register_watcher(self.watcher)
def setUp(self):
self.watcher.log.append("setUp")
return
def tearDown(self):
self.watcher.log.append("tearDown")
return
def test_aaa_something(self):
"Dummy test to set things up."
self.watcher.log.append("test_aaa")
return
def test_bbb_something(self):
"2nd dummp test to set things up"
self.watcher.log.append("test_bbb")
return
def test_zzz_test_our_real_event_sequence(self):
"Check the dummy test's sequence of events."
# Keep in mind we're running a test after another test.
expected = ['before_setup', # generated by test_aaa
'setUp',
'before_test',
'test_aaa',
'on_test_pass',
'after_test',
'tearDown',
'after_teardown',
'before_setup', # generated by test_bbb
'setUp',
'before_test',
'test_bbb',
'on_test_pass',
'after_test',
'tearDown',
'after_teardown',
'before_setup', # generated by test_zzz
'setUp',
'before_test']
print self.get_log()
self.assertEqual(expected,
self.get_log())
class TestWatchedTestCase(unittest2.TestCase):
def test_passed_test_case_runs_setup_and_cleanup(self):
mockresult = mock(unittest2.TestResult)
tc = TestCaseStub(methodName="runTest")
tc.run(mockresult)
self.assertTrue(tc.setupRan)
self.assertTrue(tc.tearDownRan)
def test_failed_setup_does_not_run_test_and_runs_cleanup(self):
mockresult = mock(unittest2.TestResult)
tc = TestCaseStub(methodName="runTest")
tc.failSetup = True
tc.run(mockresult)
self.assertTrue(tc.tearDownRan)
self.assertFalse(tc.testRan)
def test_failed_test_does_not_complete_and_runs_cleanup(self):
mockresult = mock(unittest2.TestResult)
tc = TestCaseStub(methodName="runTest")
tc.failTest = True
tc.run(mockresult)
self.assertTrue(tc.tearDownRan)
self.assertTrue(tc.testRan)
self.assertFalse(tc.testPassed)
class TestCaseStub(WatchedTestCase):
setupRan = False
testRan = False
testPassed = False
tearDownRan = False
failSetup = False
failTest = False
def setUp(self):
self.setupRan = True
if self.failSetup:
raise RuntimeError("test error")
def tearDown(self):
self.tearDownRan = True
def runTest(self):
self.testRan = True
if self.failTest:
raise RuntimeError("Failed test")
self.testPassed = True
class LoggerTestWatcher(object):
"This test watcher just logs actions to a list to verify order of events."
log = []
def before_setup(self, test_case, test_result):
print "LoggerTestWatcher before_setup"
self.log.append("before_setup")
def before_test(self, test_case, test_result):
self.log.append("before_test")
def after_test(self, test_case, test_result):
self.log.append("after_test")
def after_teardown(self, test_case, test_result):
self.log.append("after_teardown")
def on_test_failure(self, test_case, test_result, exception):
self.log.append("on_test_failure")
def on_test_error(self, test_case, test_result, exception):
self.log.append("on_test_error")
def on_test_pass(self, test_case, test_result):
self.log.append("on_test_pass")
def get_log(self):
return self.log
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest2.main()
| gpl-3.0 | -9,188,379,825,369,977,000 | 29.331361 | 78 | 0.588958 | false | 4.045777 | true | false | false |
ljmanso/AGM2 | tools/agmdsr/src/genericworker.py | 1 | 3256 | #
# Copyright (C) 2017 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
import sys, Ice, os
from PySide import *
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except KeyError:
print '$ROBOCOMP environment variable not set, using the default value /opt/robocomp'
ROBOCOMP = '/opt/robocomp'
preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ --all /opt/robocomp/interfaces/"
Ice.loadSlice(preStr+"CommonBehavior.ice")
import RoboCompCommonBehavior
additionalPathStr = ''
icePaths = []
try:
SLICE_PATH = os.environ['SLICE_PATH'].split(':')
for p in SLICE_PATH:
icePaths.append(p)
additionalPathStr += ' -I' + p + ' '
icePaths.append('/opt/robocomp/interfaces')
except:
print 'SLICE_PATH environment variable was not exported. Using only the default paths'
pass
ice_AGM2 = False
for p in icePaths:
print 'Trying', p, 'to load AGM2.ice'
if os.path.isfile(p+'/AGM2.ice'):
print 'Using', p, 'to load AGM2.ice'
preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ " + additionalPathStr + " --all "+p+'/'
wholeStr = preStr+"AGM2.ice"
Ice.loadSlice(wholeStr)
ice_AGM2 = True
break
if not ice_AGM2:
print 'Couln\'t load AGM2'
sys.exit(-1)
from RoboCompAGM2 import *
from agmdsrserviceI import *
import rospy
from std_msgs.msg import *
try:
from RoboCompAGM2ROS.msg import *
except:
print "couldn't load msg"
from RoboCompAGM2ROS.srv import *
#class for rosPublisher
class PublisherAGMDSRTopic():
def __init__(self):
self.pub_structuralChange = rospy.Publisher("structuralChange", World, queue_size=1000)
self.pub_edgesUpdated = rospy.Publisher("edgesUpdated", EdgeSequence, queue_size=1000)
self.pub_symbolsUpdated = rospy.Publisher("symbolsUpdated", NodeSequence, queue_size=1000)
def structuralChange(self, w):
self.pub_structuralChange.publish(w)
def edgesUpdated(self, modification):
self.pub_edgesUpdated.publish(modification)
def symbolsUpdated(self, modification):
self.pub_symbolsUpdated.publish(modification)
class GenericWorker(QtCore.QObject):
kill = QtCore.Signal()
def __init__(self, mprx):
super(GenericWorker, self).__init__()
self.agmdsrtopic_proxy = mprx["AGMDSRTopicPub"]
self.agmdsrtopic_rosproxy = PublisherAGMDSRTopic()
self.mutex = QtCore.QMutex(QtCore.QMutex.Recursive)
self.Period = 30
self.timer = QtCore.QTimer(self)
@QtCore.Slot()
def killYourSelf(self):
rDebug("Killing myself")
self.kill.emit()
# \brief Change compute period
# @param per Period in ms
@QtCore.Slot(int)
def setPeriod(self, p):
print "Period changed", p
Period = p
timer.start(Period)
| gpl-3.0 | 1,309,409,873,425,347,300 | 28.333333 | 106 | 0.727273 | false | 3.014815 | false | false | false |
raphaelvalentin/Utils | ngspice/syntax/nport.py | 1 | 5197 | from ngspice.syntax import *
from functions.science import flatten
from rawdata import touchstone
from interpolate import interp1d
__all__ = ['Nport']
class VCVSx(Netlist):
__name__ = "vcvs"
__type__ = "instance"
__indent__ = ""
def __init__(self, name='E1', nodes=('in', 'out', 'sensp', 'sensm'), gain=complex(0,0), freq=1e9 ):
self.name = name
self.nodes = nodes
self.gain = gain
self.freq = freq
n1, n2, n3 = newnode(), newnode(), newnode()
self.e1, self.e2, self.l1 = newname('e'), newname('e'), newname('l')
self.append( VCVS(name=self.e1, nodes=(nodes[0], n1, nodes[2], nodes[3]), gain=gain.real) )
self.append( VCVS(name=self.e2, nodes=(n1, nodes[1], n2, '0'), gain=abs(gain)) )
self.append( VCCS(name=newname('g'), nodes=('0', n2, nodes[2], nodes[3]), gain=1.0) )
self.append( Inductor(name=self.l1, nodes=(n2, '0'), l=gain.imag/(2.0*pi*freq)/abs(gain) ) )
def alter(self, gain, freq):
netlist = Netlist()
netlist.append( Alter(self.e1, gain=gain.real) )
netlist.append( Alter(self.e2, gain=abs(gain)) )
netlist.append( Alter(self.l1, gain.imag/(2.0*pi*freq)/abs(gain)) )
return netlist
class OnePort(Netlist):
__name__ = "oneport"
__type__ = "instance"
def __init__(self, name='oneport1', nodes=('1', '0'), gain=complex(0, 0), freq=1e9):
self.name = name
self.nodes = nodes
n1, n2 = newnode(), newnode()
self.append( Resistor(name=newname('r'), nodes=(nodes[0], n1), r=-50) )
self.append( Resistor(name=newname('r'), nodes=(n1, n2), r=100) )
self.append( VCVS(nodes=(n2, nodes[1], n1, nodes[1]), gain=gain, freq=freq) )
def alter(self, gain):
return self[2].alter(gain)
class Nport(Netlist):
# http://analog-innovation.com/CreateS-ParameterSubcircuitsforSpice.pdf
__name__ = "nport"
__type__ = "instance"
def __init__(self, name='nport1', nodes=('1', '0', '2', '0'), file="", freq=None):
self.name = name
self.nodes = nodes
self.file = file
self.data = touchstone.snp(self.file).read()
x = []
for i in xrange(len(nodes)/2):
row = []
for j in xrange(len(nodes)/2):
if freq:
freqs = self.data['freq']
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
row.append( xsij )
else:
row.append( self.data['s%d%d'%(i+1,j+1)][0] )
freq = self.data['freq'][0]
x.append(row)
self._ivcvs = []
n1 = [newnode() for i in xrange(len(nodes)/2)]
for i in xrange(len(nodes)/2):
self.append( Resistor(name=newname('r'), nodes=(nodes[i*2], n1[i]), r=-50) )
n2 = [newnode() for _i in xrange(len(nodes)/2)] + [nodes[1]]
self.append( Resistor(name=newname('r'), nodes=(n1[i], n2[0]), r=100) )
for j in xrange(len(nodes)/2):
self.append( VCVSx(nodes=(n2[j], n2[j+1], n1[j], nodes[-1]), gain=x[i][j], freq=freq) )
self._ivcvs.append( len(self)-1 )
def alter(self, freq):
x = []
for i in xrange(len(self.nodes)/2):
row = []
for j in xrange(len(self.nodes)/2):
freqs = self.data['freq']
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
row.append( xsij )
x.append(row)
y = list(flatten(x))
netlist = Netlist()
for i, k in enumerate(self._ivcvs):
netlist.append( self[k].alter(y[i], freq) )
return netlist
class Nport1(Netlist):
# http://analog-innovation.com/CreateS-ParameterSubcircuitsforSpice.pdf
__name__ = "nport"
__type__ = "instance"
def __init__(self, name='nport1', nodes=('1', '0', '2', '0'), file="", freq=None):
self.name = name
self.nodes = nodes
self.file = file
self.data = touchstone.snp(self.file).read()
x = []
if freq:
for i in xrange(len(nodes)/2):
row = []
for j in xrange(len(nodes)/2):
freqs = self.data['freq']
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
row.append( xsij )
x.append(row)
else:
for i in xrange(len(nodes)/2):
row = []
for j in xrange(len(nodes)/2):
row.append( self.data['s%d%d'%(i+1,j+1)][0] )
freq = self.data['freq'][0]
x.append(row)
self._ivcvs = []
n1 = [newnode() for i in xrange(len(nodes)/2)]
for i in xrange(len(nodes)/2):
self.append( Resistor(name=newname('r'), nodes=(nodes[i*2], n1[i]), r=-50) )
n2 = [newnode() for _i in xrange(len(nodes)/2)] + [nodes[1]]
self.append( Resistor(name=newname('r'), nodes=(n1[i], n2[0]), r=100) )
for j in xrange(len(nodes)/2):
self.append( VCVSx(nodes=(n2[j], n2[j+1], n1[j], nodes[-1]), gain=x[i][j], freq=freq) )
self._ivcvs.append( len(self)-1 )
def alter(self, freq):
y = []
n = len(self.nodes)/2
freqs = self.data['freq']
for i in xrange(n):
for j in xrange(n):
sij = self.data['s%d%d'%(i+1,j+1)]
xsij = interp1d(freqs, sij)(freq)
y.append( xsij )
netlist = Netlist()
for k, yi in zip(self._ivcvs, y):
netlist.extend( self[k].alter(yi, freq) )
return netlist
| gpl-2.0 | 2,556,812,261,765,248,500 | 34.59589 | 103 | 0.557629 | false | 2.655595 | false | false | false |
kylef/pyppp | pyppp/django/forms.py | 1 | 3810 | from django import forms
from django.conf import settings
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.contrib.auth import authenticate, REDIRECT_FIELD_NAME
from django.contrib.formtools.wizard import FormWizard
from pyppp.django import login
from pyppp.django.models import UserPPP
class UserFormBase(forms.Form):
def __init__(self, *args, **kwargs):
self.user_cache = None
super(UserFormBase, self).__init__(*args, **kwargs)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class AuthenticationForm(UserFormBase):
username = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError('Please enter a correct username and password. Note that both fields are case-sensitive.')
elif not self.user_cache.is_active:
raise forms.ValidationError('This account is inactive')
return self.cleaned_data
class PasscodeForm(UserFormBase):
username = forms.CharField(max_length=30)
passcode = forms.CharField(max_length=4)
card = forms.CharField(max_length=8)
code = forms.CharField(max_length=8)
def __init__(self, *args, **kwargs):
super(PasscodeForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['readonly'] = True
self.fields['card'].widget.attrs['readonly'] = True
self.fields['code'].widget.attrs['readonly'] = True
def clean(self):
if self.user_cache is not None:
return self.cleaned_data
username = self.cleaned_data.get('username')
passcode = self.cleaned_data.get('passcode')
if username and passcode:
self.user_cache = authenticate(username=username, passcode=passcode)
if self.user_cache is None:
raise forms.ValidationError('Incorrect passcode.')
return self.cleaned_data
class LoginWizard(FormWizard):
def parse_params(self, request, *args, **kwargs):
current_step = self.determine_step(request, *args, **kwargs)
if request.method == 'POST' and current_step == 0:
request.session.set_test_cookie()
form = self.get_form(current_step, request.POST)
if form.is_valid():
ppp, created = UserPPP.objects.get_or_create(user=form.user_cache)
passcode_info = ppp.get_current_sequence_info()
self.initial[(current_step + 1)] = {
'username': form.cleaned_data.get('username'),
'card': passcode_info['card'],
'code': '%s%s' % (passcode_info['row'], passcode_info['column'])
}
def get_template(self, step):
return 'pyppp/form.html'
def done(self, request, form_list):
if not request.session.test_cookie_worked():
print "Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."
redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
login(request, form_list[1].get_user())
return HttpResponseRedirect(redirect_to)
| bsd-2-clause | 4,494,156,654,408,472,000 | 38.6875 | 134 | 0.623622 | false | 4.127844 | false | false | false |
google/nerfactor | nerfactor/models/base.py | 1 | 5484 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import losses
from networks import base as basenet
from util import logging as logutil
logger = logutil.Logger(loggee="models/base")
class Model(tf.keras.Model):
"""Uses only the parent's trackability and nothing else.
"""
def __init__(self, config, debug=False):
super().__init__()
self.config = config
self.debug = debug
if debug:
logger.warn("Model in debug mode; behavior may be different")
self.net = {'main': basenet.Network()} # NOTE: insert trainable networks
# of your model into this dictionary, values of which will be registered
# as trainable
self.trainable_registered = False # NOTE: before training, call
# register_trainable() to register trainable parameters (which lie in
# self.net)
# Initialize loss functions and parse weights
self.wloss = self._init_loss() # NOTE: list of weight and
# (initialized) loss function pairs
def _init_loss(self):
wloss = []
loss_str = self.config.get('DEFAULT', 'loss')
for x in loss_str.split(','):
loss_name, weight = self._parse_loss_and_weight(x)
if loss_name == 'lpips':
loss = losses.LPIPS(per_ch=False)
elif loss_name == 'elpips':
bs = self.config.getint('DEFAULT', 'bs')
loss = losses.ELPIPS(bs)
elif loss_name == 'l1':
loss = losses.L1()
elif loss_name == 'l2':
loss = losses.L2()
elif loss_name == 'ssim':
loss = losses.SSIM(1 - 0)
else:
raise NotImplementedError(loss_name)
wloss.append((weight, loss))
return wloss
@staticmethod
def _parse_loss_and_weight(weight_loss_str):
"""Handles strings like '1e+2lpips' or 'l1,10barron'.
"""
# Start from the back because looking for the longest string that
# can be converted to a float
for i in range(len(weight_loss_str), -1, -1):
try:
weight = float(weight_loss_str[:i])
except ValueError:
continue
loss_name = weight_loss_str[i:]
return loss_name, weight
# Weight not specified
return weight_loss_str, 1.
def register_trainable(self):
"""Trackable objects (such as Keras sequentials and layers) must be
directly under `self` to be registered to `trainable_variables`, so
this function simply adds aliases directly under `self` to all nets'
trainable variables.
"""
registered = []
pref = 'net_'
for net_name, net in self.net.items():
attr_name = pref + net_name
assert attr_name.isidentifier(), (
"Prepending '{pref}' to your network name '{net}' doesn't "
"make a valid identifier; change your network name").format(
pref=pref, net=net_name)
for layer_i, layer in enumerate(net.layers):
if layer.trainable:
attr_name_full = attr_name + '_layer%d' % layer_i
assert not hasattr(self, attr_name_full), (
"Can't register `{}` because it is already an "
"attribute").format(attr_name_full)
setattr(self, attr_name_full, layer)
registered.append(attr_name_full)
logger.info("Trainable layers registered:\n\t%s", registered)
self.trainable_registered = True
@staticmethod
def _validate_mode(mode):
allowed_modes = ('train', 'vali', 'test')
if mode not in allowed_modes:
raise ValueError(mode)
def call(self, batch, mode='train'):
"""
Returns:
tuple:
- **pred**
- **gt**
- **loss_kwargs** (*dict*) -- Keyword arguments for loss
computation.
- **to_vis** (*dict*) -- Tensors to visualize.
"""
raise NotImplementedError
def compute_loss(self, pred, gt, **kwargs):
"""
Returns:
tf.Tensor: Loss.
"""
raise NotImplementedError
def vis_batch(self, data_dict, outdir, mode='train', dump_raw_to=None):
raise NotImplementedError
def compile_batch_vis(self, batch_vis_dirs, outpref, mode='train'):
"""Compiles batch visualizations into a consolidated view.
Returns:
str: Convinient link to your consolidated view, which will be
logged into TensorBoard. So you should add proper file extension
(and maybe also file viewer prefix), returning something like
``'http://your.file.viewer/' + outpref + '.html'``.
"""
raise NotImplementedError
| apache-2.0 | -6,663,748,743,042,805,000 | 37.34965 | 80 | 0.578957 | false | 4.335178 | false | false | false |
dials/dials | command_line/apply_mask.py | 1 | 3188 | import pickle
from dxtbx.format.image import ImageBool
from iotbx.phil import parse
import dials.util
help_message = """
This program augments a experiments JSON file with one or more masks specified by the
user. Its only function is to input the mask file paths to the experiments JSON file,
but means that the user does not have to edit the experiments file by hand.
Crucially, the mask files must be provided in the same order as their corresponding
imagesets (sequences) appear in the experiments JSON file.
Examples::
dials.apply_mask models.expt input.mask=pixels.mask
dials.apply_mask models.expt input.mask=pixels1.mask input.mask=pixels2.mask
"""
phil_scope = parse(
"""
input {
mask = None
.multiple = True
.type = str
.help = "The mask filenames, one mask per imageset"
}
output {
experiments = masked.expt
.type = str
.help = "Name of output experiments file"
}
""",
process_includes=True,
)
class Script:
"""A class to encapsulate the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import OptionParser
# Create the parser
usage = "dials.apply_mask models.expt input.mask=pixels.mask"
self.parser = OptionParser(
usage=usage, epilog=help_message, phil=phil_scope, read_experiments=True
)
def run(self, args=None):
"""Run the script."""
from dials.util import Sorry
from dials.util.options import flatten_experiments
# Parse the command line arguments
params, options = self.parser.parse_args(args, show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
# Check that an experiment list and at least one mask file have been provided
if not (experiments and params.input.mask):
self.parser.print_help()
return
# Check number of experiments
n_expts = len(experiments)
n_masks = len(params.input.mask)
if n_expts != n_masks:
raise Sorry(
"The number of masks provided must match the number of imagesets "
"(sequences).\n"
"You have provided an experiment list containing {} imageset(s).\n"
"You have provided {} mask file(s).".format(n_expts, n_masks)
)
# Get the imageset
imagesets = experiments.imagesets()
for i, imageset in enumerate(imagesets):
# Set the lookup
with open(params.input.mask[i], "rb") as f:
mask = pickle.load(f, encoding="bytes")
imageset.external_lookup.mask.filename = params.input.mask[i]
imageset.external_lookup.mask.data = ImageBool(mask)
# Dump the experiments
print(f"Writing experiments to {params.output.experiments}")
experiments.as_file(filename=params.output.experiments)
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
| bsd-3-clause | 306,330,743,678,722,560 | 29.951456 | 86 | 0.620452 | false | 4.015113 | false | false | false |
WGBH/django-textplusstuff | textplusstuff/datastructures.py | 1 | 5466 | from __future__ import unicode_literals
import json
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from .parser import (
MarkdownFlavoredTextNode,
ModelStuffNode,
TextPlusStuffLexer,
TextPlusStuffParser
)
class TextPlusStuff(object):
def __init__(self, raw_text, field=None):
raw_text = raw_text or ""
if not isinstance(raw_text, str):
raise UnicodeError(
(
"TextPlusStuff can only be initialized with either "
"unicode or UTF-8 strings."
)
)
else:
raw_text_processed = force_text(raw_text, errors='replace')
self.raw_text = raw_text_processed
# Initialize lexer
lexer = TextPlusStuffLexer(raw_val=raw_text_processed)
# Use the lexer to create tokens
tokens = lexer.tokenize()
# Pass tokens to parser and parse
self.nodelist = TextPlusStuffParser(tokens=tokens).parse()
def render(self, render_markdown_as, **kwargs):
"""
Renders a TextPlusStuffField
`render_markdown_as`: The format that markdown-flavored text should
be transformed in. Options: `html`, `markdown`, `plain_text`
"""
final_output = ""
include_content_nodes = kwargs.pop('include_content_nodes', True)
extra_context = kwargs.pop('extra_context', None)
for node in self.nodelist:
if isinstance(node, MarkdownFlavoredTextNode):
final_output += node.render(render_as=render_markdown_as)
elif isinstance(node, ModelStuffNode):
if include_content_nodes is False:
pass
else:
final_output += node.render(extra_context=extra_context)
return final_output
def as_html(self, **kwargs):
"""
Renders a TextPlusStuffField as HTML.
Optional keyword arguments:
* `include_content_nodes`: Boolean signifying whether or not to render
content nodes (i.e. ModelStuff tokens).
Defaults to `True`.
"""
return mark_safe(
self.render(
'html',
include_content_nodes=kwargs.pop(
'include_content_nodes', True
),
extra_context=kwargs.pop('extra_context', None)
)
)
def as_json(self, **kwargs):
"""
Renders a TextPlusStuffField as a JSON object.
* `render_markdown_as`: The format that markdown-flavored text should
be transformed in. Options: `html` (default), `markdown`, `plain_text`.
"""
final_output_as_html = ""
final_output_as_markdown = ""
include_content_nodes = kwargs.pop('include_content_nodes', True)
extra_context = kwargs.pop('extra_context', None)
convert_to_json_string = kwargs.pop('convert_to_json_string', False)
model_stuff_node_counter = 0
model_stuff_node_context_list = []
for node in self.nodelist:
if isinstance(node, MarkdownFlavoredTextNode):
final_output_as_html += node.render(render_as='html')
final_output_as_markdown += node.render(render_as='markdown')
elif isinstance(node, ModelStuffNode):
if include_content_nodes is True:
final_output_as_markdown += "{{{{ NODE__{index} }}}}"\
.format(
index=model_stuff_node_counter
)
final_output_as_html += (
'<span data-textplusstuff-contentnode-arrayindex='
'"{index}"></span>'
).format(index=model_stuff_node_counter)
model_stuff_node_context_list.append({
'model': '{}:{}'.format(
node.node_mapping.get('content_type__app_label'),
node.node_mapping.get('content_type__model')
),
'rendition': node.get_rendition().short_name,
'context': node.get_node_context(
extra_context=extra_context
)
})
model_stuff_node_counter += 1
dict_to_return = {
'text_as_markdown': final_output_as_markdown,
'text_as_html': final_output_as_html,
'content_nodes': model_stuff_node_context_list
}
to_return = dict_to_return
if convert_to_json_string is True:
to_return = json.dumps(dict_to_return)
return to_return
def as_plaintext(self, **kwargs):
"""
Renders a TextPlusStuffField as plain text (all markdown
formatting removed).
Content nodes (i.e. ModelStuff tokens) will not be rendered.
"""
return self.render(
'plain_text',
include_content_nodes=False
)
def as_markdown(self, **kwargs):
"""
Renders a TextPlusStuffField as markdown.
Content nodes (i.e. ModelStuff tokens) will not be rendered.
"""
return self.render(
'markdown',
include_content_nodes=False
)
__all__ = ('TextPlusStuff')
| mit | 2,719,036,316,729,344,000 | 35.932432 | 79 | 0.5397 | false | 4.3728 | false | false | false |
prophile/jacquard | jacquard/directory/base.py | 1 | 1451 | """Base class for directory implementations."""
import abc
import collections
UserEntry = collections.namedtuple("UserEntry", "id join_date tags")
UserEntry.__doc__ = """
Description of attributes of a single user.
Internally this is a `collections.namedtuple`.
"""
UserEntry.id.__doc__ = """String user ID."""
UserEntry.join_date.__doc__ = """Date at which the user is considered to have joined."""
UserEntry.tags.__doc__ = """Container of tags which apply to this user, defined by the directory."""
class Directory(metaclass=abc.ABCMeta):
"""User directory."""
@abc.abstractmethod
def __init__(self, **kwargs):
"""
Standard constructor.
Keyword arguments are taken from the `directory` section of config
files, and appear as strings. Useful for specifying connection URLs
etc.
"""
raise NotImplementedError
@classmethod
def from_configuration(cls, config, options):
"""
Construct from a Config object, and a dictionary of options.
By default this does not use the `config` and just defers to
`__init__` passing the options as kwargs.
"""
return cls(**options)
@abc.abstractmethod
def lookup(self, user_id):
"""
Look up user by ID.
For missing users this must return None, otherwise it must return a
corresponding `UserEntry`.
"""
raise NotImplementedError
| mit | -3,654,069,613,207,161,300 | 27.45098 | 100 | 0.645762 | false | 4.635783 | false | false | false |
UfSoft/trac-google-search | tracext/google/search/__init__.py | 1 | 3529 | # -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2008 UfSoft.org - Pedro Algarvio <[email protected]>
#
# Please view LICENSE for additional licensing information.
# ==============================================================================
__version__ = '0.1.3'
__author__ = 'Pedro Algarvio'
__email__ = '[email protected]'
__packagename__ = 'TracGoogleSearch'
__license__ = 'BSD'
__url__ = 'http://google.ufsoft.org'
__summary__ = 'Google Adsense Search Plugin for Trac'
import pkg_resources
from trac.config import Option, BoolOption, IntOption
from trac.core import Component, implements
from trac.env import IEnvironmentSetupParticipant
from trac.web.chrome import ITemplateProvider
# ==============================================================================
# Google Search Config
# ==============================================================================
class GoogleSearchConfig(Component):
google_search_active = BoolOption(
'google.search', 'google_search_active', True,
"""Enable Google Adsense search."""
)
search_form_id = Option('google.search', 'search_form_id', 'search',
"""The form ID where the adsesnse for search code should be placed.
The default is "search" which is trac's mini search form. Content will
be replaced"""
)
search_form_text_input_width = IntOption(
'google.search', 'search_form_text_input_width', 31,
"""
Initial width(number of characters) of the search string text input.
"""
)
search_form_forid = Option('google.search', 'search_form_forid', '',
"""This is the value of the hidden input with the name "cof" that
Google gives on their code, usualy something like "FORID:n" where n
is an integer value. This cannot be empty."""
)
search_form_client_id = Option('google.search', 'search_form_client_id', '',
"""This is the value of the hidden input with the name "cx" that
Google gives on their code, usualy something like
"partner-pub-0000000000000000:0aaa0aaa00a" (this is just an example).
This cannot be empty."""
)
search_iframe_initial_width = IntOption(
'google.search', 'search_iframe_initial_width', 800,
"""
Initial width of the IFRAME that Google returns. It will then increase
the available width of the div by the ID "content".
This value should not be that bigger because resizing only occurs
correctly if initial size is smaller than the available width.
"""
)
# ==============================================================================
# Google Search Resources
# ==============================================================================
class GoogleSearchResources(Component):
implements(ITemplateProvider)
# ITemplateProvider methods
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
yield 'googlesearch', pkg_resources.resource_filename(__name__,
'htdocs')
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
Genshi templates.
"""
yield pkg_resources.resource_filename(__name__, 'templates')
| bsd-3-clause | 4,980,436,316,728,324,000 | 42.02439 | 80 | 0.55924 | false | 4.55814 | false | false | false |
pombredanne/dask | dask/tests/test_async.py | 1 | 5236 | from __future__ import absolute_import, division, print_function
from operator import add
from copy import deepcopy
import dask
import pytest
from dask.async import *
fib_dask = {'f0': 0, 'f1': 1, 'f2': 1, 'f3': 2, 'f4': 3, 'f5': 5, 'f6': 8}
def test_start_state():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
result = start_state_from_dask(dsk)
expeted = {'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'released': set([]),
'running': set([]),
'ready': ['z'],
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
def test_start_state_looks_at_cache():
dsk = {'b': (inc, 'a')}
cache = {'a': 1}
result = start_state_from_dask(dsk, cache)
assert result['dependencies']['b'] == set(['a'])
assert result['ready'] == ['b']
def test_start_state_with_redirects():
dsk = {'x': 1, 'y': 'x', 'z': (inc, 'y')}
result = start_state_from_dask(dsk)
assert result['cache'] == {'x': 1}
def test_start_state_with_independent_but_runnable_tasks():
assert start_state_from_dask({'x': (inc, 1)})['ready'] == ['x']
def test_finish_task():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
sortkey = order(dsk).get
state = start_state_from_dask(dsk)
state['ready'].remove('z')
state['running'] = set(['z', 'other-task'])
task = 'z'
result = 2
oldstate = deepcopy(state)
state['cache']['z'] = result
finish_task(dsk, task, state, set(), sortkey)
assert state == {
'cache': {'y': 2, 'z': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'finished': set(['z']),
'released': set(['x']),
'running': set(['other-task']),
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'ready': ['w'],
'waiting': {},
'waiting_data': {'y': set(['w']),
'z': set(['w'])}}
def test_get():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
assert get_sync(dsk, 'w') == 4
assert get_sync(dsk, ['w', 'z']) == (4, 2)
def test_nested_get():
dsk = {'x': 1, 'y': 2, 'a': (add, 'x', 'y'), 'b': (sum, ['x', 'y'])}
assert get_sync(dsk, ['a', 'b']) == (3, 3)
def test_cache_options():
try:
from chest import Chest
except ImportError:
return
cache = Chest()
def inc2(x):
assert 'y' in cache
return x + 1
with dask.set_options(cache=cache):
get_sync({'x': (inc2, 'y'), 'y': 1}, 'x')
def test_sort_key():
L = ['x', ('x', 1), ('z', 0), ('x', 0)]
assert sorted(L, key=sortkey) == ['x', ('x', 0), ('x', 1), ('z', 0)]
def test_callback():
f = lambda x: x + 1
dsk = {'a': (f, 1)}
from dask.threaded import get
def start_callback(key, d, state):
assert key == 'a' or key is None
assert d == dsk
assert isinstance(state, dict)
def end_callback(key, value, d, state, worker_id):
assert key == 'a' or key is None
assert value == 2 or value is None
assert d == dsk
assert isinstance(state, dict)
get(dsk, 'a', start_callback=start_callback, end_callback=end_callback)
def test_order_of_startstate():
dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b'),
'x': 1, 'y': (inc, 'x')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['y', 'b']
dsk = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'y'),
'a': 1, 'b': (inc, 'a')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['b', 'y']
def test_nonstandard_exceptions_propagate():
class MyException(Exception):
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return "My Exception!"
def f():
raise MyException(1, 2)
from dask.threaded import get
try:
get({'x': (f,)}, 'x')
assert False
except MyException as e:
assert "My Exception!" in str(e)
assert "Traceback" in str(e)
assert 'a' in dir(e)
assert 'traceback' in dir(e)
assert e.exception.a == 1 and e.exception.b == 2
assert e.a == 1 and e.b == 2
def test_remote_exception():
e = TypeError("hello")
a = remote_exception(e, 'traceback')
b = remote_exception(e, 'traceback')
assert type(a) == type(b)
assert isinstance(a, TypeError)
assert 'hello' in str(a)
assert 'traceback' in str(a)
| bsd-3-clause | 5,824,641,980,801,946,000 | 27.456522 | 75 | 0.442704 | false | 3.230105 | true | false | false |
nagordon/mechpy | mechpy/composites.py | 1 | 71681 | # coding: utf-8
'''
Module for composite material analysis
Hyer-Stress Analysis of Fiber-Reinforced Composite Materials
Herakovich-Mechanics of Fibrous Composites
Daniel-Engineering Mechanics of Composite Materials
Kollar-Mechanics of COmposite Structures
NASA- Basic Mechancis of Lamianted Composites
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19950009349.pdf
TODO:
* transverse shear stress reddy pg 136 or daniel pg 139
* include line loads (Qx,Qy) for combined loading
* calculate capability of panel based on margin
'''
#==============================================================================
# Import Modules
#==============================================================================
from __future__ import print_function, division
__author__ = 'Neal Gordon <[email protected]>'
__date__ = '2016-12-02'
__version__ = 0.1
from copy import copy
from numpy import pi, zeros, ones, linspace, arange, array, sin, cos, sqrt, pi
from numpy.linalg import solve, inv
#from scipy import linalg
import numpy as np
#np.set_printoptions(suppress=False,precision=2) # suppress scientific notation
np.set_printoptions(precision=3, linewidth=200)#, threshold=np.inf)
import scipy
from scipy.spatial import ConvexHull
#np.set_printoptions(formatter={'float': lambda x: "{:.2f}".format(x)})
import pandas as pd
import sympy as sp
from sympy import Function, dsolve, Eq, Derivative, symbols, pprint
from sympy.plotting import plot3d
#from sympy import cos, sin
#sp.init_printing(use_latex='mathjax')
#sp.init_printing(wrap_line=False, pretty_print=True)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (8,5)
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 14
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlim,ylim,title,legend, \
grid, show, xlabel,ylabel, tight_layout
from mpl_toolkits.mplot3d import axes3d
# if using ipython console, turn off inline plotting
#mpl.use('Qt5Agg')
# inline plotting
from IPython import get_ipython
#get_ipython().magic('matplotlib inline')
###disable inline plotting
try:
get_ipython().magic('matplotlib')
except:
pass
from IPython.display import display
import os
plt.close('all')
#==============================================================================
# Functions
#==============================================================================
def import_matprops(mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def Sf(E1,E2,nu12,G12):
'''transversely isptropic compliance matrix. pg 58 herakovich'''
nu21 = E2*nu12/E1
S = array([[1/E1, -nu21/E2, 0],
[-nu12/E1, 1/E2, 0],
[0, 0, 1/G12]])
return S
def S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic compliance matrix.
For transversly isotropic
E2=E3, nu12=nu13,G12=G13,G23=E2/(2(1+nu23))
'''
S6 = array( [[ 1/E1, -nu12/E1, -nu12/E1, 0, 0, 0],
[-nu12/E1, 1/E2, -nu23/E2, 0, 0, 0],
[-nu12/E1, -nu23/E2, 1/E2, 0, 0, 0],
[ 0, 0, 0, 1/G23, 0, 0],
[ 0, 0, 0, 0, 1/G13, 0],
[ 0, 0, 0, 0, 0, 1/G12]])
return S6
def C6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic stiffness matrix.
'''
C6 = inv(S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23))
return C6
def Qf(E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def T61(th):
'''Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
reddy pg 91'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 0, 0, 0, 2*m*n],
[n**2, m**2, 0, 0, 0,-2*m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-m*n, m*n, 0, 0, 0,(m**2-n**2)]])
return T1
def T62(th):
'''Strain
voight notation for strain transform. epsilon1 = T2 @ epsilonx
th=ply angle in degrees
reddy pg 91
'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, 0, 0, 0, m*n],
[n**2, m**2, 0, 0, 0,-m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-2*m*n, 2*m*n, 0, 0, 0,(m**2-n**2)]])
return T2
def T1(th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2(th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def T1s(th):
'''Symbolic Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T1 = sp.Matrix( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2s(th):
'''Symbolic Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T2 = sp.Matrix( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def failure_envelope():
# failure envelopes
# max stress criteria
# 1 direction in first row
# 2 direction in second row
# failure strength in compression
#Fc = matrix([[-1250.0, -600.0],
# [-200.0, -120.0]]) # ksi
#
##failure strength in tension
#Ft = matrix([[1500, 1000]
# [50, 30]]) # ksi
#
##Failure strength in shear
#Fs = matrix( [100, 70] ) # Shear
Fc1 = [-1250, -600] # Compression 1 direction
Fc2 = [-200, -120] # Compression 2 direction
Ft1 = [1500, 1000] # Tension 1 direction
Ft2 = [50, 30] # Tension 2 direction
Fs = [100, 70] # Shear
# F1 = Ft(1);
# F2 = Ft(1);
# F6 = Fs(1);
for c in range(2):# mattype
factor = 1.25
# right
plot( [Ft1[c], Ft1[c]], [Fc2[c], Ft2[c]])
# left
plot( [Fc1[c], Fc1[c]] , [Fc2[c], Ft2[c]])
# top
plot( [Fc1[c], Ft1[c]] , [Ft2[c], Ft2[c]])
# bottom
plot( [Fc1[c], Ft1[c]] , [Fc2[c], Fc2[c]])
# center horizontal
plot( [Fc1[c], Ft1[c]] , [0, 0])
# center vertical
plot( [0, 0] , [Fc2[c], Ft2[c]])
#xlim([min(Fc1) max(Ft1)]*factor)
#ylim([min(Fc2) max(Ft2)]*factor)
xlabel('$\sigma_1,ksi$')
ylabel('$\sigma_2,ksi$')
title('failure envelope with Max-Stress Criteria')
def material_plots(materials = ['Carbon_cloth_AGP3705H']):
'''
plotting composite properties
Sf(E1,E2,nu12,G12)
'''
# plt.rcParams['figure.figsize'] = (10, 8)
# plt.rcParams['font.size'] = 14
# plt.rcParams['legend.fontsize'] = 14
plyangle = arange(-45, 45.1, 0.1)
h = 1 # lamina thickness
layupname='[0]'
mat = import_matprops(materials)
Ex = mat[materials[0]].E1
Ey = mat[materials[0]].E2
nuxy = mat[materials[0]].nu12
Gxy = mat[materials[0]].G12
# layupname = '[0, 45, 45, 0]'
# Ex= 2890983.38
# Ey= 2844063.06
# nuxy= 0.27
# Gxy= 1129326.25
# h = 0.0600
plt.close('all')
S = Sf(Ex,Ey,nuxy,Gxy)
C = inv(S)
C11 = [(inv(T1(th)) @ C @ T2(th))[0,0] for th in plyangle]
C22 = [(inv(T1(th)) @ C @ T2(th))[1,1] for th in plyangle]
C33 = [(inv(T1(th)) @ C @ T2(th))[2,2] for th in plyangle]
C12 = [(inv(T1(th)) @ C @ T2(th))[0,1] for th in plyangle]
Exbar = zeros(len(plyangle))
Eybar = zeros(len(plyangle))
Gxybar = zeros(len(plyangle))
Q = Qf(Ex,Ey,nuxy,Gxy)
Qbar = zeros((len(plyangle),3,3))
for i,th in enumerate(plyangle):
Qbar[i] = solve(T1(th), Q) @ T2(th)
#Qbar = [solve(T1(th),Q) @ T2(th) for th in plyangle]
Qbar11 = Qbar[:,0,0]
Qbar22 = Qbar[:,1,1]
Qbar66 = Qbar[:,2,2]
Qbar12 = Qbar[:,0,1]
Qbar16 = Qbar[:,0,2]
Qbar26 = Qbar[:,1,2]
Aij = Qbar*h
# laminate Stiffness
# | Exbar Eybar Gxybar |
# A = | vxybar vyxbar etasxbar |
# | etaxsbar etaysbar etasybar |
# laminate Comnpliance
aij = zeros((len(plyangle),3,3))
for i, _Aij in enumerate(Aij):
aij[i] = inv(_Aij)
# material properties for whole laminate (Daniel, pg183)
Exbar = [1/(h*_aij[0,0]) for _aij in aij]
Eybar = [1/(h*_aij[1,1]) for _aij in aij]
Gxybar = [1/(h*_aij[2,2]) for _aij in aij]
# Global Stress
s_xy = array([[100],
[10],
[5]])
# local ply stress
s_12 = np.zeros((3,len(plyangle)))
for i,th in enumerate(plyangle):
#s_12[:,i] = np.transpose(T1(th) @ s_xy)[0] # local stresses
s_12[:,[i]] = T1(th) @ s_xy
# Plotting
figure()#, figsize=(10,8))
plot(plyangle, C11, plyangle, C22, plyangle, C33, plyangle, C12)
legend(['$\overline{C}_{11}$','$\overline{C}_{22}$', '$\overline{C}_{44}$', '$\overline{C}_{66}$'])
title('Transversly Isotropic Stiffness properties of carbon fiber T300_5208')
xlabel("$\Theta$")
ylabel('$\overline{C}_{ii}$, ksi')
grid()
figure()#, figsize=(10,8))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title("Constitutive Properties in various angles")
xlabel("$\Theta$")
ylabel("modulus, psi")
legend()
grid()
figure()#,figsize=(10,8))
plot(plyangle, s_12[0,:], label = '$\sigma_{11},ksi$' )
plot(plyangle, s_12[1,:], label = '$\sigma_{22},ksi$' )
plot(plyangle, s_12[2,:], label = '$\sigma_{12},ksi$' )
legend(loc='lower left')
xlabel("$\Theta$")
ylabel("Stress, ksi")
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar11, label = "Qbar11")
plot(plyangle,Qbar22, label = "Qbar22")
plot(plyangle,Qbar66, label = "Qbar66")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar12, label = "Qbar12")
plot(plyangle,Qbar16, label = "Qbar16")
plot(plyangle,Qbar26, label = "Qbar26")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
titlename = 'Laminate Properties varying angle for {} {}'.format(materials[0], layupname)
#df = pd.DataFrame({'plyangle':plyangle, 'Exbar':Exbar, 'Eybar':Eybar,'Gxybar':Gxybar})
#print(df)
#df.to_csv(titlename+'.csv')
plt.figure(figsize=(9,6))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title(titlename)
xlabel("$\Theta$")
ylabel("modulus, psi")
legend(loc='best')
grid()
#plt.savefig(titlename+'.png')
show()
def laminate_gen(lamthk=1.5, symang=[45,0,90], plyratio=2.0, matrixlayers=False, balancedsymmetric=True):
'''
## function created to quickly create laminates based on given parameters
lamthk=1.5 # total #thickness of laminate
symang = [45,0,90, 30] #symmertic ply angle
plyratio=2.0 # lamina/matrix ratio
matrixlayers=False # add matrix layers between lamina plys
nonsym=False # symmetric
mat = material type, as in different plies, matrix layer, uni tapes, etc
#ply ratio can be used to vary the ratio of thickness between a matrix ply
and lamina ply. if the same thickness is desired, plyratio = 1,
if lamina is 2x as thick as matrix plyratio = 2
'''
if matrixlayers:
nply = (len(symang)*2+1)*2
nm = nply-len(symang)*2
nf = len(symang)*2
tm = lamthk / (plyratio*nf + nm)
tf = tm*plyratio
plyangle = zeros(nply//2)
mat = 2*ones(nply//2) # orthotropic fiber and matrix = 1, isotropic matrix=2,
mat[1:-1:2] = 1 # [2 if x%2 else 1 for x in range(nply//2) ]
plyangle[1:-1:2] = symang[:] # make a copy
thk = tm*ones(nply//2)
thk[2:2:-1] = tf
lamang = list(symang) + list(symang[::-1])
plyangle = list(plyangle) + list(plyangle[::-1])
mat = list(mat) + list(mat[::-1])
thk = list(thk) + list(thk[::-1])
else: # no matrix layers, ignore ratio
if balancedsymmetric:
nply = len(symang)*2
mat = list(3*np.ones(nply))
thk = list(lamthk/nply*np.ones(nply))
lamang = list(symang) + list(symang[::-1])
plyangle = list(symang) + list(symang[::-1])
else:
nply = len(symang)
mat =[1]*nply
thk = list(lamthk/nply*np.ones(nply))
lamang = symang[:]
plyangle = symang[:]
return thk,plyangle,mat,lamang
def make_quasi(n0=4,n45=4):
#n0 = 4
#n45 = 13
#
#ply0 = [0]*n0
#ply45 = [45]*n45
#plyangle = []
#from itertools import zip_longest
#for x,y in zip_longest(ply0,ply45):
# if len(plyangle)<min(len(ply0),len(ply45))*2:
# plyangle.append(x)
# plyangle.append(y)
# else:
# plyangle.append(x)
# plyangle.reverse()
# plyangle.append(y)
#plyangle = [x for x in plyangle if x is not None]
#plyangle
ntot = n45+n0
plyangle = [45]*int(n45)
for p in [0]*int(n0):
plyangle.append(p)
plyangle.reverse()
return plyangle
#@xw.func
def laminate_calcs(NM,ek,q0,plyangle,plymatindex,materials,platedim, zoffset,SF,plots,prints):
'''
code to compute composite properties, applied mechanical and thermal loads
and stress and strain
inputs
NM # force/moments lbs/in
ek # strain, curvature in/in
q0 = pressure
plyangle # angle for each ply
plymatindex # material for each ply
materials # list materials used,
general outline for computing elastic properties of composites
1) Determine engineering properties of unidirectional laminate. E1, E2, nu12, G12
2) Calculate ply stiffnesses Q11, Q22, Q12, Q66 in the principal/local coordinate system
3) Determine Fiber orientation of each ply
4) Calculate the transformed stiffness Qxy in the global coordinate system
5) Determine the through-thicknesses of each ply
6) Determine the laminate stiffness Matrix (ABD)
7) Calculate the laminate compliance matrix by inverting the ABD matrix
8) Calculate the laminate engineering properties
# Stress Strain Relationship for a laminate, with Q=reduced stiffness matrix
|sx | |Qbar11 Qbar12 Qbar16| |ex +z*kx |
|sy |=|Qbar12 Qbar22 Qbar26|=|ey +z*ky |
|sxy| |Qbar16 Qbar26 Qbar66| |exy+z*kxy|
# Herakovich pg 84
Qbar = inv(T1) @ Q @ T2 == solve(T1, Q) @ T2
transformation reminders - see Herakovich for details
sig1 = T1*sigx
sigx = inv(T1)*sig1
eps1 = T2*epsx
epsx = inv(T2)*epsx
sigx = inv(T1)*Q*T2*epsx
Qbar = inv(T1)*Q*T2
Sbar = inv(T2)*inv(Q)*T2
Notes, core transverse direction is G13, ribbon direction is G23
a_width = 50 # plate width (inches or meters)
b_length = 50 # laminate length, inches or meters
'''
#==========================================================================
# Initialize python settings
#==========================================================================
#get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Define composite properties
#==========================================================================
assert(len(plyangle)==len(plymatindex))
a_width, b_length = platedim
# either apply strains or loads , lb/in
Nx_, Ny_, Nxy_, Mx_, My_, Mxy_ = NM
NMbarapp = array([[Nx_],[Ny_],[Nxy_],[Mx_],[My_],[Mxy_]])
ex_, ey_, exy_, kx_, ky_, kxy_ = ek
epsilonbarapp = array([[ex_],[ey_],[exy_],[kx_],[ky_],[kxy_]])
Ti = 0 # initial temperature (C)
Tf = 0 # final temperature (C)
#SF = 1.0 # safety factor
#==========================================================================
# Import Material Properties
#==========================================================================
mat = import_matprops(materials)
#mat = import_matprops(['E-Glass Epoxy cloth','rohacell2lb']) # Herakovich
alphaf = lambda mat: array([[mat.alpha1], [mat.alpha2], [0]])
''' to get ply material info, use as follows
alpha = alphaf(mat[materials[plymatindex[i]]])
mat[materials[1]].E2
'''
laminatethk = array([mat[materials[i]].plythk for i in plymatindex ])
nply = len(laminatethk) # number of plies
H = np.sum(laminatethk) # plate thickness
# area = a_width*H
z = zeros(nply+1)
zmid = zeros(nply)
z[0] = -H/2
for i in range(nply):
z[i+1] = z[i] + laminatethk[i]
zmid[i] = z[i] + laminatethk[i]/2
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) # inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
A += Qbar*(z[i+1]-z[i])
# coupling stiffness
B += (1/2)*Qbar*(z[i+1]**2-z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(z[i+1]**3-z[i]**3)
#Cbar6 = T61 @ C6 @ np.transpose(T61)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B + zoffset*A
ABD[3:6,0:3] = B + zoffset*A
ABD[3:6,3:6] = D + 2*zoffset*B + zoffset**2*A
# laminatee compliance
abcd = inv(ABD)
a = abcd[0:3,0:3]
#==========================================================================
# Laminate Properties
#==========================================================================
# effective laminate shear coupling coefficients
etasxbar = a[0,2]/a[2,2]
etasybar = a[1,2]/a[2,2]
etaxsbar = a[2,0]/a[0,0]
etaysbar = a[2,1]/a[1,1]
# laminate engineer properties
Exbar = 1 / (H*a[0,0])
Eybar = 1 / (H*a[1,1])
Gxybar = 1 / (H*a[2,2])
nuxybar = -a[0,1]/a[0,0]
nuyxbar = -a[0,1]/a[1,1]
# TODO: validate results, does not appear to be correct
# strain centers, pg 72, NASA-Basic mechanics of lamianted composites
# added divide by zero epsilon
z_eps0_x = -B[0,0] / (D[0,0] + 1e-16)
z_eps0_y = -B[0,1] / (D[0,1] + 1e-16)
z_eps0_xy = -B[0,2] / (D[0,2] + 1e-16)
z_sc = -B[2,2] / (D[2,2] +1e-16) # shear center
# --------------------- Double Check ---------------------
# # Laminate compliance matrix
# LamComp = array([ [1/Exbar, -nuyxbar/Eybar, etasxbar/Gxybar],
# [-nuxybar/Exbar, 1/Eybar , etasybar/Gxybar],
# [etaxsbar/Exbar, etaysbar/Eybar, 1/Gxybar]] )
# # Daniel pg 183
# # combines applied loads and applied strains
# strain_laminate = LamComp @ Nxyzapplied[:3]/H + strainxyzapplied[:3]
# Nxyz = A @ strain_laminate
# stress_laminate = Nxyz/H
# --------------------------------------------------------
#==========================================================================
# Pressure Load
#==========================================================================
#==========================================================================
# pressure displacement and moments
#==========================================================================
D11,D12,D22,D66 = D[0,0], D[0,1], D[1,1], D[2,2]
B11 = B[0,0]
A11, A12 = A[0,0], A[0,1]
# reddy pg 247 Navier displacement solution for a simply supported plate
s = b_length/a_width
x = a_width/2
y = b_length/2
# 5.2.8, reddy, or hyer 13.123
terms = 5
w0 = 0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
w0 += Wmn * sin(alpha*x) * sin(beta*y)
w0_simplesupport = w0
# 5.2.12a, reddy
# mid span moments
Mxq=Myq=Mxyq=0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
Mxq += (D11*alpha**2 + D12*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Myq += (D12*alpha**2 + D22*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Mxyq += alpha*beta*D66 * Wmn * cos(m*pi*x/a_width) * cos(n*pi*y/b_length)
Mxyq = -2*Mxyq
NMq = [[0],[0],[0],[Mxq],[Myq],[Mxyq]]
# hyer, x-pin-pin, y-free-free plate reaction forces, pg 619
# Forces and Moments across the width of the plate
A11R = A11*(1-B11**2/(A11*D11))
D11R = D11*(1-B11**2/(A11*D11))
Nxq0 = lambda x: B11/D11 * q0 * a_width**2 /12
Nyq0 = lambda x: B11 * A12*q0 * a_width**2 / (D11*A11R*12) * (6*(x/a_width)**2-1/2)
Nxyq0 = lambda x: 0
Mxq0 = lambda x: q0 * a_width**2/8 * (1-4*(x/a_width)**2)
Myq0 = lambda x: D12 * q0 * a_width**2 / (D11R*8) * ((1-2*B11**2/(3*A11*D11))-(4*(x/a_width)**2))
Mxyq0 = lambda x: 0
# clamped plate 5.4.11, reddy
#w0_clamped = ( 49 * q0*a_width**4 * (x/a_width - (x/a_width)**2 )**2 * (y/b_length - (y/b_length)**2)**2) / (8 * (7*D11+4*(D12 + 2*D66)*s**2 + 7*D22*s**4) )
# reddy, 5.4.12
w0_clamped = 0.00342 * (q0*a_width**4) / (D11+0.5714*(D12+2*D66)*s**2+D22*s**4)
# reddy, 5.4.15
#w0_clamped = 0.00348 * (q0*a_width**4) / (D11*b_length**4+0.6047*(D12+2*D66)*s**2+D22*s**4)
# reddy 5.4.15, for isotropic D11=D
w0_clamped_isotropic = 0.00134*q0*a_width**4/D11
#==========================================================================
# Applied Loads and pressure loads
#==========================================================================
NMbarapptotal = NMbarapp + NMq + ABD @ epsilonbarapp
#==========================================================================
# Thermal Loads
#==========================================================================
'''
if the material is isotropic and unconstrained, then no thermal stresses
will be experienced. If there are constraints, then the material will experience
thermally induced stresses. As with orthotropic materials, various directions will have
different stresses, and when stacked in various orientations, stresses can be
unintuitive and complicated. Global Thermal strains are subtracted from applied strains
# 1) determine the free unrestrained thermal strains in each layer, alphabar
'''
dT = Tf-Ti
Nhatth= zeros((3,1)) # unit thermal force in global CS
Mhatth = zeros((3,1)) # unit thermal moment in global CS
alphabar = zeros((3,nply)) # global ply CTE
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
alpha = alphaf(mat[materials[plymatindex[i]]])
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
alphabar[:,[i]] = solve(T2(plyangle[i]), alpha)
#alphabar[:,[i]] = inv(T2(plyangle[i])) @ alpha # Convert to global CS
Nhatth += Qbar @ (alphabar[:,[i]])*(z[i+1] - z[i]) # Hyer method for calculating thermal unit loads
Mhatth += 0.5*Qbar@(alphabar[:,[i]])*(z[i+1]**2-z[i]**2)
NMhatth = np.vstack((Nhatth,Mhatth))
NMbarth = NMhatth*dT # resultant thermal loads
# Laminate CTE
epsilonhatth = abcd@NMhatth # laminate CTE
# applied loads and thermal loads
epsilonbarapp = abcd @ NMbarapptotal
epsilonbarth = abcd @ NMbarth # resultant thermal strains
epsilonbartotal = epsilonbarapp + epsilonbarth
# Composite respone from applied mechanical loads and strains. Average
# properties only. Used to compare results from tensile test.
#epsilon_laminate = abcd@NMbarapptotal
#sigma_laminate = ABD@epsilon_laminate/H
epsilon_laminate = epsilonbartotal[:]
sigma_laminate = ABD@epsilonbartotal/H
alpha_laminate = a@Nhatth
# determine thermal load and applied loads or strains Hyer pg 435,452
Nx = NMbarapptotal[0,0]*a_width # units kiloNewtons, total load as would be applied in a tensile test
Ny = NMbarapptotal[1,0]*b_length # units kN
#==========================================================================
# Thermal and mechanical local and global stresses at the ply interface
#==========================================================================
# Declare variables for plotting
epsilon_app = zeros((3,2*nply))
sigma_app = zeros((3,2*nply))
epsilonbar_app = zeros((3,2*nply))
sigmabar_app = zeros((3,2*nply))
epsilon_th = zeros((3,2*nply))
sigma_th = zeros((3,2*nply))
epsilonbar_th = zeros((3,2*nply))
sigmabar_th = zeros((3,2*nply))
epsilon = zeros((3,2*nply))
epsilonbar = zeros((3,2*nply))
sigma = zeros((3,2*nply))
sigmabar = zeros((3,2*nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress is calcuated at top and bottom of each ply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
### transverse shear, herakovich pg 254
#Q44 = mat[materials[plymatindex[i]]].G23
#Q55 = mat[materials[plymatindex[i]]].G13
#Qbar44 = Q44*cos(plyangle[i])**2+Q55*sin(plyangle[i])**2
#Qbar55 = Q55*cos(plyangle[i])**2 + Q44*sin(plyangle[i])**2
#Qbar45 = (Q55-Q44)*cos(plyangle[i])*sin(plyangle[i])
#epsilontransverse = array([[gammayz],[gammaxz]])
#sigmatransverse = array([[Qbar44, Qbar45],[Qbar45, Qbar55]]) @ epsilontransverse
# Global stresses and strains, applied load only
epsbarapp1 = epsilonbarapp[0:3] + z[i]*epsilonbarapp[3:7]
epsbarapp2 = epsilonbarapp[0:3] + z[i+1]*epsilonbarapp[3:7]
sigbarapp1 = Qbar @ epsbarapp1
sigbarapp2 = Qbar @ epsbarapp2
# Local stresses and strains, appplied load only
epsapp1 = T2(plyangle[i]) @ epsbarapp1
epsapp2 = T2(plyangle[i]) @ epsbarapp2
sigapp1 = Q @ epsapp1
sigapp2 = Q @ epsapp2
# Interface Stresses and Strains
epsilon_app[:,k:k+2] = np.column_stack((epsapp1,epsapp2))
epsilonbar_app[:,k:k+2] = np.column_stack((epsbarapp1,epsbarapp2))
sigma_app[:,k:k+2] = np.column_stack((sigapp1,sigapp2))
sigmabar_app[:,k:k+2] = np.column_stack((sigbarapp1,sigbarapp2))
# Global stress and strains, thermal loading only
epsbarth1 = epsilonbarth[0:3] + z[i]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
epsbarth2 = epsilonbarth[0:3] + z[i+1]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
sigbarth1 = Qbar @ epsbarth1
sigbarth2 = Qbar @ epsbarth2
# Local stress and strains, thermal loading only
epsth1 = T2(plyangle[i]) @ epsbarth1
epsth2 = T2(plyangle[i]) @ epsbarth2
sigth1 = Q @ epsth1
sigth2 = Q @ epsth2
# Interface Stresses and Strains
epsilon_th[:,k:k+2] = np.column_stack((epsth1,epsth2))
epsilonbar_th[:,k:k+2] = np.column_stack((epsbarth1+dT*alphabar[:,[i]],epsbarth2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma_th[:,k:k+2] = np.column_stack((sigth1,sigth2))
sigmabar_th[:,k:k+2] = np.column_stack((sigbarth1,sigbarth2))
# TOTAL global stresses and strains, applied and thermal
epsbar1 = epsbarapp1 + epsbarth1
epsbar2 = epsbarapp2 + epsbarth2
sigbar1 = Qbar @ epsbar1
sigbar2 = Qbar @ epsbar2
# TOTAL local stresses and strains , applied and thermal
eps1 = T2(plyangle[i]) @ epsbar1
eps2 = T2(plyangle[i]) @ epsbar2
sig1 = Q @ eps1
sig2 = Q @ eps2
# Interface Stresses and Strains
epsilon[:,k:k+2] = np.column_stack((eps1,eps2))
epsilonbar[:,k:k+2] = np.column_stack((epsbar1+dT*alphabar[:,[i]],epsbar2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma[:,k:k+2] = np.column_stack((sig1,sig2))
sigmabar[:,k:k+2] = np.column_stack((sigbar1,sigbar2))
#==========================================================================
# Strength Failure Calculations
#==========================================================================
# Strength Ratio
STRENGTHRATIO_MAXSTRESS = zeros((3,2*nply))
# Failure Index
FAILUREINDEX_MAXSTRESS = zeros((3,2*nply))
STRENGTHRATIO_TSAIWU = zeros((nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress
s1 = sigma[0,k]
s2 = sigma[1,k]
s12 = np.abs(sigma[2,k])
# strength
F1 = mat[materials[plymatindex[i]]].F1t if s1 > 0 else mat[materials[plymatindex[i]]].F1c
F2 = mat[materials[plymatindex[i]]].F2t if s2 > 0 else mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# Max Stress failure index ,failure if > 1, then fail, FI = 1/SR
FAILUREINDEX_MAXSTRESS[0,k:k+2] = s1 / F1
FAILUREINDEX_MAXSTRESS[1,k:k+2] = s2 / F2
FAILUREINDEX_MAXSTRESS[2,k:k+2] = s12 / F12
# Tsai Wu, failure occures when > 1
F1t = mat[materials[plymatindex[i]]].F1t
F1c = mat[materials[plymatindex[i]]].F1c
F2t = mat[materials[plymatindex[i]]].F2t
F2c = mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# inhomogeneous Tsai-Wu criterion # from Daniel
# http://www2.mae.ufl.edu/haftka/composites/mcdaniel-nonhomogenous.pdf
f1 = 1/F1t + 1/F1c
f2 = 1/F2t + 1/F2c
f11 = -1/(F1t*F1c)
f22 = -1/(F2t*F2c)
f66 = 1/F12**2
f12 = -0.5*sqrt(f11*f22)
#TW = f1*s1 + f2*s2 + f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2
# polynomial to solve. Added a machine epsilon to avoid divide by zero errors
lam1 = f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 + 1e-16
lam2 = f1*s1 + f2*s2 + 1e-16
lam3 = -1
# smallest positive root
roots = array([(-lam2+sqrt(lam2**2-4*lam1*lam3)) / (2*lam1) ,
(-lam2-sqrt(lam2**2-4*lam1*lam3)) / (2*lam1)] )
STRENGTHRATIO_TSAIWU[i] = roots[roots>=0].min() # strength ratio
# f1 = 1/F1t - 1/F1c
# f2 = 1/F2t - 1/F2c
# f11 = 1/(F1t*F1c)
# f22 = 1/(F2t*F2c)
# f66 = 1/F12**2
# STRENGTHRATIO_TSAIWU[i] = 2 / (f1*s2 + f2*s2 + sqrt((f1*s1+f2*s2)**2+4*(f11*s1**2+f22*s2**2+f66*s12**2)))
### Apply safety factors
FAILUREINDEX_MAXSTRESS = FAILUREINDEX_MAXSTRESS * SF
STRENGTHRATIO_TSAIWU = STRENGTHRATIO_TSAIWU / SF
###
MARGINSAFETY_TSAIWU = STRENGTHRATIO_TSAIWU-1 # margin of safety
# strength ratio for max stress, if < 1, then fail, SR = 1/FI
STRENGTHRATIO_MAXSTRESS = 1/(FAILUREINDEX_MAXSTRESS+1e-16)
# margin of safety based on max stress criteria
MARGINSAFETY_MAXSTRESS = STRENGTHRATIO_MAXSTRESS-1
# minimum margin of safety for Max stress failure
MARGINSAFETY_MAXSTRESS_min = MARGINSAFETY_MAXSTRESS.min().min()
FAILUREINDEX_MAXSTRESS_max = FAILUREINDEX_MAXSTRESS.max().max()
# minimum margin of safety of both Tsai-Wu and Max Stress
#MARGINSAFETY_MAXSTRESS_min = np.minimum(MARGINSAFETY_MAXSTRESS.min().min(), MARGINSAFETY_TSAIWU.min() )
# find critial values for all failure criteria
#MARGINSAFETY_MAXSTRESS = MARGINSAFETY_MAXSTRESS[~np.isinf(MARGINSAFETY_MAXSTRESS)] # remove inf
#MARGINSAFETY_TSAIWU = MARGINSAFETY_TSAIWU[~np.isinf(MARGINSAFETY_TSAIWU)] # remove inf
#==========================================================================
# Buckling Failure Calculations
#==========================================================================
''' Buckling of Clamped plates under shear load, reddy, 5.6.17'''
k11 = 537.181*D11/a_width**4 + 324.829*(D12+2*D66)/(a_width**2*b_length**2) + 537.181*D22/b_length**4
k12 = 23.107/(a_width*b_length)
k22 = 3791.532*D11/a_width**4 + 4227.255*(D12+2*D66)/(a_width**2*b_length**2) + 3791.532*D22/b_length**4
Nxycrit0 = 1/k12*np.sqrt(k11*k22)
FI_clamped_shear_buckling = (abs(Nxy_)*SF) / Nxycrit0 # failure if > 1
MS_clamped_shear_buckling = 1/(FI_clamped_shear_buckling+1e-16)-1
'''Kassapoglous pg 126,137
simply supported plate buckling, assumes Nx>0 is compression
Nxcrit0 is the axial load that causes buckling
Nxycrit0 is the shear load that cause buckling
Nxcrit is the axial load part of a combined load that causes buckling
Nxycrit is the shear load part of a combined load that causes buckling
'''
# no buckling issues if Nx is positive
# buckling calcuations assumes Nx compression is positive.
Nx__ = abs(Nx_) if Nx_ < 0 else np.float64(0)
Nxy__ = np.float64(0) if Nxy_ == 0 else abs(Nxy_) # assume shear in 1 direction although both directions are ok
# Nxy=0
Nxcrit0 = pi**2/a_width**2 * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
# Nx=0
Nxycrit0 = 9*pi**4*b_length / (32*a_width**3) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling = 0,0,0,0
if Nx__ == 0 or Nxy__ == 0:
FI_Nxy0_buckling = (Nxy__*SF)/Nxycrit0
FI_Nx0_buckling = (Nx__*SF)/Nxcrit0
else:
# interaction term
k = Nxy__ / Nx__
Nxcrit = min( abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 + sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) ,
abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 - sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) )
Nxycrit = Nxycrit0*sqrt(1-Nxcrit/Nxcrit0)
# interactive calc
FI_Nx_buckling = (Nx__ *SF)/Nxcrit
FI_Nxy_buckling = (Nxy__*SF)/Nxycrit
FI_combinedload_simplesupport_buckle = max([FI_Nxy0_buckling,
FI_Nx0_buckling,
FI_Nx_buckling,
FI_Nxy_buckling] )
MS_min_buckling = 1/(FI_combinedload_simplesupport_buckle+1e-16)-1
#==========================================================================
# Facesheet Wrinkling
#==========================================================================
#==========================================================================
# principal lamainte stresses
#==========================================================================
sigma_principal_laminate = np.linalg.eig(array([[sigma_laminate[0,0],sigma_laminate[2,0],0],
[sigma_laminate[2,0],sigma_laminate[1,0],0],
[0,0,0]]))[0]
tauxy_p = sigma_laminate[2,0]
sigmax_p = sigma_laminate[0,0]
sigmay_p = sigma_laminate[1,0]
thetap = 0.5 * np.arctan( 2*tauxy_p / ((sigmax_p-sigmay_p+1e-16))) * 180/np.pi
#==========================================================================
# Printing Results
#==========================================================================
if prints:
print('--------------- laminate1 Stress analysis of fibers----------')
print('(z-) plyangles (z+)'); print(plyangle)
print('(z-) plymatindex (z+)'); print(plymatindex)
print('ply layers') ; print(z)
print('lamiante thickness, H = {:.4f}'.format(H))
#print('x- zero strain laminate center, z_eps0_x = {:.4f}'.format(z_eps0_x))
#print('y- zero strain laminate center, z_eps0_y = {:.4f}'.format(z_eps0_y))
#print('xy-zero strain laminate center, z_eps0_xy = {:.4f}'.format(z_eps0_xy))
#print('shear center laminate center, z_sc = {:.4f}'.format(z_sc))
print('Applied Loads'); print(NM)
print('ABD=');print(ABD)
print('Ex= {:.2f}'.format(Exbar) )
print('Ey= {:.2f}'.format(Eybar) )
print('nuxy= {:.2f}'.format(nuxybar) )
print('Gxy= {:.2f}'.format(Gxybar) )
print('epsilon_laminate') ; print(epsilon_laminate)
print('sigma_laminate') ; print(sigma_laminate)
print('sigma_principal_laminate') ; print(sigma_principal_laminate)
print('principal_angle = {:.2f} deg'.format(thetap))
print('NMbarapp') ; print(NMbarapp)
print('sigma') ; print(sigma)
print('\nMax Stress Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format( MARGINSAFETY_MAXSTRESS_min ) )
print(MARGINSAFETY_MAXSTRESS)
print('\nTsai-Wu Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format(MARGINSAFETY_TSAIWU.min()))
print(MARGINSAFETY_TSAIWU)
print('\nmaximum failure index = {:.4f}'.format( FAILUREINDEX_MAXSTRESS_max ))
print(FAILUREINDEX_MAXSTRESS)
print('\nBuckling MS for Nxy only for clamped edges = {:.4f}\n'.format(MS_clamped_shear_buckling))
# print('---- Individual Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nxy0 = {:.2f}'.format(FI_Nxy0_buckling) )
# print('FI_Nx0 = {:.2f}'.format(FI_Nx0_buckling) )
# print('---- Interactive Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nx = {:.2f}'.format(FI_Nx_buckling) )
# print('FI_Nxy = {:.2f}'.format(FI_Nxy_buckling) )
# print('---- Buckling Failure Index (fail>1) combined loads and simple support -----')
# print(FI_combinedload_simplesupport_buckle)
print('buckling combined loads and simple support MS = {:.4f}\n'.format((MS_min_buckling)))
print('Mx_midspan = {:.2f}'.format(Mxq) )
print('My_midspan = {:.2f}'.format(Myq) )
print('Mxy_midspan = {:.2f}'.format(Mxyq) )
print('w0_simplesupport = {:.6f}'.format(w0_simplesupport) )
print('w0_clamped = {:.6f}'.format(w0_clamped) )
print('w0_clamped_isotropic= {:.6f}'.format(w0_clamped_isotropic) )
#display(sp.Matrix(sigmabar))
#==========================================================================
# Plotting
#==========================================================================
if plots:
windowwidth = 800
windowheight = 450
zplot = zeros(2*nply)
for i,k in enumerate(range(0,2*nply,2)): # = nply
zplot[k:k+2] = z[i:i+2]
#legendlab = ['total','thermal','applied','laminate']
# global stresses and strains
mylw = 1.5 #linewidth
# Global Stresses and Strains
f1, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f1.canvas.set_window_title('Global Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_x$','$\sigma_y$','$\\tau_{xy}$']
strainlabel = ['$\epsilon_x$','$\epsilon_y$','$\gamma_{xy}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilonbar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilonbar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75, linestyle='--', label='thermal')
ax.plot(epsilonbar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
#ax.set_xticks(linspace( min(ax.get_xticks()) , max(ax.get_xticks()) ,6))
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigmabar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigmabar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigmabar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,50,windowwidth,windowheight)
except:
pass
f1.show()
#plt.savefig('global-stresses-strains.png')
### Local Stresses and Strains
f2, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f2.canvas.set_window_title('Local Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_1$','$\sigma_2$','$\\tau_{12}$']
strainlabel = ['$\epsilon_1$','$\epsilon_2$','$\gamma_{12}$']
strengthplot = [ [ [F1t,F1t],[zplot.min(), zplot.max()], [F1c, F1c],[zplot.min(), zplot.max()] ] ,
[ [F2t,F2t],[zplot.min(), zplot.max()], [F2c, F2c],[zplot.min(), zplot.max()] ] ,
[ [F12,F12],[zplot.min(), zplot.max()], [-F12,-F12],[zplot.min(), zplot.max()] ] ]
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilon[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilon_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(epsilon_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigma[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigma_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigma_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
### plots strengths
#ax.plot(strengthplot[i][0],strengthplot[i][1], color='yellow', lw=mylw)
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,50,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### Failure
f3, ((ax1,ax2,ax3)) = plt.subplots(1,3, sharex=True, sharey=True)
f3.canvas.set_window_title('Failure Index(failure if > 1), %s laminate' % (plyangle))
stresslabel = ['$\sigma_1/F_1$','$\sigma_2/F_2$','$\\tau_{12}/F_{12}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
#ax.set_title(' Ply Strain at $\epsilon=%f$' % (epsxapp*100))
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(FAILUREINDEX_MAXSTRESS[i,:], zplot, color='blue', lw=mylw, label='total')
ax.grid(True)
ax.set_title('Failure Index, fail if > 1')
#leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,windowheight+100,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### warpage
res = 100
Xplt,Yplt = np.meshgrid(np.linspace(-a_width/2,a_width/2,res), np.linspace(-b_length/2,b_length/2,res))
epsx = epsilon_laminate[0,0]
epsy = epsilon_laminate[1,0]
epsxy = epsilon_laminate[2,0]
kapx = epsilon_laminate[3,0]
kapy = epsilon_laminate[4,0]
kapxy = epsilon_laminate[5,0]
### dispalcement
w = -0.5*(kapx*Xplt**2 + kapy*Yplt**2 + kapxy*Xplt*Yplt)
u = epsx*Xplt # pg 451 hyer
fig = plt.figure('plate-warpage')
ax = fig.gca(projection='3d')
ax.plot_surface(Xplt, Yplt, w+zmid[0], cmap=mpl.cm.jet, alpha=0.3)
###ax.auto_scale_xyz([-(a_width/2)*1.1, (a_width/2)*1.1], [(b_length/2)*1.1, (b_length/2)*1.1], [-1e10, 1e10])
ax.set_xlabel('plate width,y-direction,in')
ax.set_ylabel('plate length,x-direction, in')
ax.set_zlabel('warpage,in')
#ax.set_zlim(-0.01, 0.04)
#mngr = plt.get_current_fig_manager() ; mngr.window.setGeometry(450,550,600, 450)
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,windowheight+100,windowwidth,windowheight)
except:
pass
plt.show()
#plt.savefig('plate-warpage')
return MARGINSAFETY_MAXSTRESS_min, FAILUREINDEX_MAXSTRESS_max
def plate():
'''
composite plate mechanics
TODO - results need vetted
'''
#==========================================================================
# Initialize
#==========================================================================
get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Import Material Properties
#==========================================================================
plythk = 0.0025
plyangle = array([0,90,-45,45,0]) * np.pi/180 # angle for each ply
nply = len(plyangle) # number of plies
laminatethk = np.zeros(nply) + plythk
H = sum(laminatethk) # plate thickness
# Create z dimensions of laminate
z_ = np.linspace(-H/2, H/2, nply+1)
a = 20 # plate width;
b = 10 # plate height
q0_ = 5.7 # plate load;
# Transversly isotropic material properties
E1 = 150e9
E2 = 12.1e9
nu12 = 0.248
G12 = 4.4e9
nu23 = 0.458
G23 = E2 / (2*(1+nu23))
# Failure Strengths
F1t = 1500e6
F1c = -1250e6
F2t = 50e6
F2c = -200e6
F12t = 100e6
F12c = -100e6
Strength = np.array([[F1t, F1c],
[F2t, F2c],
[F12t, F12c]])
th = sp.symbols('th')
# Stiffnes matrix in material coordinates
Cijm6 = inv(Sij6)
# reduced stiffness in structural
Cij = sp.Matrix([[Cij6[0,0], Cij6[0,1], 0],
[Cij6[0,1], Cij6[1,1], 0],
[0, 0, Cij6[5,5] ]] )
Tij = sp.Matrix([[cos(th)**2, sin(th)**2, 2*sin(th)*cos(th)],
[sin(th)**2, cos(th)**2, -2*sin(th)*cos(th)],
[-cos(th)*sin(th), sin(th)*cos(th), (cos(th)**2-sin(th)**2)]])
## Cylindrical Bending of a laminated plate
# displacement in w (z direction)
from sympy.abc import x
f = Function('f')
eq = dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), hint = '1st_homogeneous_coeff_best', simplify=False)
pprint(eq)
#==============================================================================
th,x,y,z,q0,C1,C2,C3,C4,C5,C6,C7,A11,B11,D11,A16,B16 = symbols('th x y z q0 C1 C2 C3 C4 C5 C6 C7 A11 B11 D11 A16 B16')
wfun = Function('wfun')
ufun = Function('ufun')
## EQ 4.4.1a
eq1 = A11*ufun(x).diff(x,2) - B11*wfun(x).diff(x,3)
#eq1 = A11*diff(ufun,x,2) - B11*diff(wfun,x,3); # C5 C1
## EQ 4.4.1b
#eq2 = A16*diff(ufun,x,2) - B16*diff(wfun,x,3); # C5 C1
eq2 = A16*ufun(x).diff(x,2) - B16*wfun(x).diff(x,3)
## EQ 4.4.1c
#eq3 = B11*diff(ufun,x,3) - D11*diff(wfun,x,4) + q0;
eq3 = B11*ufun(x).diff(x,3) - D11*wfun(x).diff(x,4) + q0
################## python conversion eded here ################################
# solve eq1 eq2 and eq3 to get the w and u functions
# displacement in w (z direction) from eq1,eq2,eq3
wfun = A11*q0*x**4 / (4*(6*B11**2-6*A11*D11)) + C1 + C2*x + C3*x**2 + C4*x**3 # C1 C2 C3 C4
# displacement in u (x direction) from eq1,eq2,eq3
ufun = B11*q0*x**3 / (6*(B11**2-A11*D11)) + C7 + x*C6 + 3*B11*x**2*C5/A11 # C5 C6 C7
# Cij6.evalf(subs={th:plyangle[i]}) * (z_[i+1]**3-z_[i]**3)
# cond1 -> w(0)=0 at x(0), roller
C1sol = sp.solve(wfun.subs(x,0), C1)[0] # = 0
# cond2 -> angle at dw/dx at x(0) is 0, cantilever
C2sol = sp.solve(wfun.diff(x).subs(x,0),C2)[0] # = 0
# cond3 -> w(z) = 0 at x(a), roller
C4sol1 = sp.solve(wfun.subs({x:a,C1:C1sol,C2:C2sol}),C4)[0] # C3
# cond4 u = 0 at x = 0
C7sol = sp.solve(ufun.subs(x,0),C7)[0] #=0
# u=0 at x = a
C5sol1 = sp.solve(ufun.subs({x:a, C7:C7sol}),C5)[0] #C6
# cond 5 EQ 4.4.14a Myy = 0 @ x(a) (Mxx , B11 D11) (Myy, B12 D12) roller no moment
C6sol1 = sp.solve( ( ((B11*ufun.diff(x)+0.5*wfun.diff(x)**2 ) - D11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol})), C6)[0] # C6 C3
# EQ 4.4.13a, Nxx = 0 @ x(0) roller has no Nxx
C6sol2 = sp.solve( ((A11* ufun.diff(x) + 0.5*wfun.diff(x)**2)-B11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol}),C6)[0] # C6 C3
C3sol = sp.solve(C6sol1 - C6sol2,C3)[0]
C4sol = C4sol1.subs(C3,C3sol)
C6sol = sp.simplify(C6sol2.subs(C3,C3sol))
C5sol = sp.simplify(C5sol1.subs(C6,C6sol))
# substitute integration constants with actual values( _ is actual number)
C1_ = copy(C1sol)
C2_ = copy(C2sol)
C7_ = copy(C7sol)
C3_ = C3sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C4_ = C4sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C5_ = C5sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C6_ = C6sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function w(x) vertical displacement w along z with actual vaules
wsol = wfun.subs({q0:q0_, C1:C1_, C2:C2_, C3:C3_, C4:C4_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function u(x) horizontal displacement u along x with actual vaules
usol = ufun.subs({q0:q0_, C5:C5_, C6:C6_, C7:C7_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# 3d plots
plot3d(wsol,(x,0,a), (y,0,b))
plt.xlabel('x')
plt.ylabel('y')
plt.title('Cylindrical Bending -Displacement of a plate With CLPT')
## Strain calculation
# eq 3.3.8 (pg 116 reddy (pdf = 138))
epstotal = array([[usol.diff(x) + 0.5* wsol.diff(x)**5 - z*wsol.diff(x,2)],[0],[0]])
epsx = epstotal[0,0]
## Calculating and plotting Stress in each layer
res = 8 # accuracy of finding max and min stress
xplot = linspace(0,a,res)
yplot = linspace(0,b,res)
G0 = sp.symbols('G0')
Globalminstress = np.zeros((3, nply))
Globalmaxstress = np.zeros((3, nply))
for kstress in range(3): # stress state s_x, s_y, s_xz
plt.figure(kstress+1)
for klay in range(nply): # loop through all layers
thplot = plyangle[klay]
zplot = linspace(z_[klay],z_[klay+1],res)
stressplot = np.zeros((len(zplot),len(xplot)))
## Calc Stresses
if kstress == 2:
# Shear stresses
G0_ = -sp.integrate(s_stress[0].diff(x),z)+G0
# solve for shear stresses from s_1
s_xz = sp.solve(G0_,G0)[0]
# out of plane shear S_xz does not need to be transformed ??
plot3d(s_xz, (x,0, a), (z, z_[klay], z_[klay+1]) )
else:
# normal stresses
# Cij = reduced structural stiffness in strictural coordinates 3x3
# stress in structural coordinates
s_stress = Cij.subs(th,thplot) @ epstotal
# stressin material coordinates
m_stress = Tij.subs(th,thplot) @ s_stress
#ezsurf(m_stress(kstress),[0,a,z_(klay),z_(klay+1)])
## find max stress in each layer
ii=0
for i in xplot:
jj=0
for j in zplot:
if kstress == 2:
stressplot[ii,jj] = s_xz.subs({x:i, z:j})
else:
stressplot[ii,jj] = m_stress[kstress].subs({x:i, z:j})
jj+=jj
ii+=ii
Globalminstress[kstress,klay] = np.min(stressplot)
Globalmaxstress[kstress,klay] = np.max(stressplot)
#
plt.title('\sigma_%i' % kstress)
## Plot max stress and failure strength
plt.figure()
for i in range(3):
plt.subplot(1, 3, i+1)
plt.bar(range(nply), Globalmaxstress[i,:])
plt.bar(range(nply), Globalminstress[i,:])
plt.scatter(range(nply),np.ones(nply) * Strength[i,0])
plt.scatter(range(nply),np.ones(nply) * Strength[i,1])
plt.xlabel('layer')
plt.title('\sigma%i' % i)
def plate_navier():
'''
composite plate bending with navier solution
TODO - code needs to be converted from matlab
'''
## Plate a*b*h simply supported under q = q0 CLPT
pass
'''
q0,a,b,m,n,x,y = sp.symbols('q0 a b m n x y')
Qmn = 4/(a*b)*sp.integrate( sp.integrate( q0*sp.sin(m*pi*x/a)*sp.sin(n*pi*y/b),(x,0,a)) ,(y,0,b))
dmn = pi**4 / b**4 * (DTij(1,1)*m**4*(b/a)**4 + 2* (DTij(1,2)+2*DTij(6,6)) *m**2*n**2*(b/a)**2 + DTij(2,2)*n**4)
Wmn = Qmn/dmn;
w0 = Wmn * sin(m*pi*x/a) * sin(n*pi*y/b);
w0_ = subs(w0,[q0 a b],[-q0_ a_ b_] );
figure
w0sum = 0;
for n_ = 1:10
for m_ = 1:10
w0sum = w0sum + subs(w0_,[n m],[n_ m_]);
end
end
w0sum;
% xplot = linspace(0,a_,res);
% yplot = linspace(0,b_,res);
ii=1;
for i = xplot
jj=1;
for j = yplot
w0plot(ii,jj) = subs(w0sum,[x y],[i j]);
jj=jj+1;
end
ii=ii+1;
end
surf(xplot,yplot,w0plot)
colorbar
set(gca,'PlotBoxAspectRatio',[2 1 1]);
xlabel('length a, u(x)')
ylabel('length b, v(y)')
zlabel('w(z)')
'''
class laminate(object):
"""
IN-WORK - laminate object for composite material analysis
"""
# constructor
def __init__(self, plyangle, matindex, matname):
# run when laminate is instantiated
# loads materials used
self.plyangle = plyangle
self.matindex = matindex
self.matname = matname
self.__mat = self.__import_matprops(matname)
# create a simple function to handle CTE properties
def __alphaf(self, mat):
return array([[mat.alpha1], [mat.alpha2], [0]])
self.laminatethk = array([self.__mat[matname[i]].plythk for i in matindex ])
self.nply = len(self.laminatethk) # number of plies
self.H = np.sum(self.laminatethk) # plate thickness
# area = a_width*H
z = zeros(self.nply+1)
zmid = zeros(self.nply)
z[0] = -self.H/2
for i in range(self.nply):
z[i+1] = z[i] + self.laminatethk[i]
zmid[i] = z[i] + self.laminatethk[i]/2
self.z = z
self.zmid = zmid
self.__abdmatrix()
def __Qf(self, E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def __T1(self, th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def __T2(self, th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
# private method
def __abdmatrix(self):
'''used within the object but not accessible outside'''
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(self.nply): # = nply
Q = self.__Qf(self.__mat[self.matname[self.matindex[i]]].E1,
self.__mat[self.matname[self.matindex[i]]].E2,
self.__mat[self.matname[self.matindex[i]]].nu12,
self.__mat[self.matname[self.matindex[i]]].G12 )
Qbar = inv(self.__T1(self.plyangle[i])) @ Q @ self.__T2(self.plyangle[i]) # solve(T1(plyangle[i]), Q) @ T2(plyangle[i])
A += Qbar*(self.z[i+1]-self.z[i])
# coupling stiffness
B += (1/2)*Qbar*(self.z[i+1]**2-self.z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(self.z[i+1]**3-self.z[i]**3)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B
ABD[3:6,0:3] = B
ABD[3:6,3:6] = D
self.ABD = ABD
# method
def available_materials(self):
'''show the materials available in the library'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
print('---available materials---')
for k in matprops.columns.tolist():
print(k)
print('-------------------------')
# private method to be used internally
def __import_matprops(self, mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def failure_envelope_laminate(Nx,Ny,Nxy,Mx,My,Mxy,q0,mymat,layup):
'''
find the miniumu margin give load conditions
'''
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
_, FAILUREINDEX_MAXSTRESS_max = laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= layup,
plymatindex=[0,0,0,0],
materials = [mymat],
platedim=[10,10],
zoffset=0,
SF=1.0,
plots=0,
prints=0)
return FAILUREINDEX_MAXSTRESS_max
def plot_single_max_failure_loads(mymat='E-Glass Epoxy fabric M10E-3783', mylayup=[0,45,45,0] ):
'''
loops through and tries to find a load that is close to 0 and then
attempts to find the root (ie margin=0)
older version used newton method for root finding
scipy.optimize.newton(laminate_min, guess)
TODO: Current calculation is stupid using random points to plot. fix it
by use FI, failure index instead of margin to generate a
linear relationship and envelope
'''
#laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0)
loadnamelist = ['Nx','Ny','Nxy','Mx','My','Mxy','q0']
laminate_min_list = []
laminate_min_list.append(lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,N,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,N,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,N,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,N,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,0,N,mymat,mylayup))
envelope_loads = []
N_t = array([0,1])
N_c = array([0,-1])
for loadname,laminate_min in zip(loadnamelist,laminate_min_list):
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
N_crit_t = (1-b) / m
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
N_crit_c = (1-b) / m
envelope_loads.append('{} = {:.1f} , {:.1f}'.format(loadname,N_crit_t, N_crit_c))
print('------------- enveloped loads for {} {} -----------------'.format(mylayup, mymat))
for k in envelope_loads:
print(k)
# plot envelope
Nx_env = []
Nxy_env = []
laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
laminate_min = lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
laminate_min_Nx_Nxy_func = lambda Nx,Nxy: failure_envelope_laminate(Nx,0,Nxy,0,0,0,0,mymat,mylayup)
n = 500
f = 1.25 # < 1
# arr1 = np.random.randint(Nx_env[0]-abs(Nx_env[0]*f),Nx_env[0]+abs(Nx_env[0])*f,n)
# arr2 = np.random.randint(Nx_env[1]-abs(Nx_env[1]*f),Nx_env[1]+abs(Nx_env[1])*f,n)
# Nx_r = np.concatenate((arr1, arr2))
#
# arr1 = np.random.randint(Nxy_env[2]-abs(Nxy_env[2])*f,Nxy_env[2]+abs(Nxy_env[2])*f,n)
# arr2 = np.random.randint(Nxy_env[3]-abs(Nxy_env[3])*f,Nxy_env[3]+abs(Nxy_env[3])*f,n)
# Nxy_r = np.concatenate((arr1, arr2))
Nx_r = np.random.randint(Nx_env[0]*f,Nx_env[1]*f, n)
Nxy_r = np.random.randint(Nxy_env[2]*f,Nxy_env[3]*f, n)
for Nx_ri, Nxy_ri in zip(Nx_r, Nxy_r):
FI = laminate_min_Nx_Nxy_func(Nx_ri, Nxy_ri)
if FI < 1:
Nx_env.append(Nx_ri)
Nxy_env.append(Nxy_ri)
points = array([ [x,xy] for x,xy in zip(Nx_env, Nxy_env)])
hull = scipy.spatial.ConvexHull(points)
plot(points[:,0], points[:,1], 'bo')
for simplex in hull.simplices:
plot(points[simplex, 0], points[simplex, 1], 'k-')
xlabel('Nx, lb/in')
ylabel('Nxy, lb/in')
title('Failure envelope')
return envelope_loads
def my_laminate_with_loading():
# loads lbs/in
Nx = 50
Ny = 0
Nxy = 0
Mx = 0
My = 0
Mxy = 0
q0 = 0 # pressure
# Qx = 0
# Qy = 0
a_width = 50
b_length = 3.14*6.75
## sandwich laminate
# plyangle= [45,45,0, 45,45],
# plymatindex=[0, 0, 1, 0, 0],
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= [0,60,-60,-60,60,0],
plymatindex=[0,0,0,0,0,0],
materials = ['E-Glass Epoxy Uni'],
platedim=[a_width,b_length],
zoffset=0,
SF=2.0,
plots=0,
prints=1)
if __name__=='__main__':
#plot_single_max_failure_loads()
#plot_failure_index()
my_laminate_with_loading()
#material_plots(['E-Glass Epoxy fabric M10E-3783'])
#plate()
#plot_Nx_Nxy_failure_envelope(['Carbon_cloth_AGP3705H'])
#plot_single_max_failure_loads()
# # reload modules
# import importlib ; importlib.reload
# from composites import laminate
# plyangle = [0,45]
# matindex = [0,0]
# matname = ['graphite-polymer_SI']
# lam1 = laminate(plyangle, matindex, matname)
# lam1.ABD
| mit | 4,286,907,931,120,713,000 | 37.298794 | 238 | 0.518464 | false | 2.876906 | false | false | false |
StoneyJackson/unzipR | unzipr.py | 1 | 4368 | # unzipR - A library for recursively extracting files.
# Copyright (C) 2014 Stoney Jackson <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Unzipr determines a files compression algorithm based on the file's
extension. Zip files are extracted into the a directory by the same name as
the zip file minus its extension. So foo.zip is extracted in to a directory
named foo.
New formats can be supports via Unzipr.registerUnzipFormat().
See installRarSupport() at the end of this file for an example.
'''
import pathlib
import shutil
import logging
logger = logging.getLogger(__name__)
def deleteZipFilesFromDirectoryRecursively(directory):
directory = pathlib.Path(directory)
for a_file in directory.iterdir():
if isZipFile(a_file):
a_file.unlink()
elif a_file.is_dir():
deleteZipFilesFromDirectoryRecursively(a_file)
def unzipFileRecursively(zipfile, toDir=None):
'''
If toDir is None, zipfile is extracted to a directory whose name is the same
as the zipfile's name minus its extensions.
'''
zipfile = pathlib.Path(zipfile)
toDir = unzipFile(zipfile, toDir)
unzipFilesInDirectoryRecursively(toDir)
return toDir
def unzipFilesInDirectoryRecursively(directory):
directory = pathlib.Path(directory)
for a_file in directory.iterdir():
logger.debug("processing " + str(a_file))
if isZipFile(a_file):
logger.debug("unzipping " + str(a_file))
unzipFileRecursively(a_file)
elif a_file.is_dir():
logger.debug("recursing " + str(a_file))
unzipFilesInDirectoryRecursively(a_file)
def unzipFile(zipfile, toDir=None):
'''
If toDir is None, zipfile is extracted to a directory whose name is the same
as the zipfile's name minus its extensions.
'''
zipfile = pathlib.Path(zipfile)
if toDir:
toDir = pathlib.Path(toDir)
else:
toDir = zipfile.parent / getFileNameWithoutExtension(zipfile)
shutil.unpack_archive(str(zipfile), str(toDir))
return toDir
def getFileNameWithoutExtension(theFile):
theFile = pathlib.Path(theFile)
extension = getFileExtension(theFile)
return theFile.name[:-len(extension)]
def isZipFile(zipfile):
zipfile = pathlib.Path(zipfile)
isZipFile = zipfile.is_file() and fileHasSupportedExtension(zipfile)
return isZipFile
def fileHasSupportedExtension(zipfile):
zipfile = pathlib.Path(zipfile)
extension = getFileExtension(zipfile)
return isSupportedExtension(extension)
def getFileExtension(theFile):
if len(theFile.suffixes) >= 2:
lastTwoSuffixes = ''.join(theFile.suffixes[-2:])
if lastTwoSuffixes == '.tar.gz':
return lastTwoSuffixes
else:
return theFile.suffix
def isSupportedExtension(extension):
return extension in getSupportedExtensions()
def getSupportedExtensions():
supported_extensions = []
for format_ in shutil.get_unpack_formats():
supported_extensions += format_[1]
return supported_extensions
def registerUnzipFormat(name, extensions, function):
shutil.register_unpack_format(name, extensions, function)
def installRarSupport():
try:
import rarfile
def unrar(zipFile, toDir):
with rarfile.RarFile(zipFile) as rf:
rf.extractall(path=toDir)
registerUnzipFormat('rar', ['.rar'], unrar)
except ImportError:
pass
def install7zipSupport():
if shutil.which('7z'):
import subprocess
def un7zip(zipFile, toDir):
subprocess.call(['7z', 'x', str(zipFile), '-o' + str(toDir)])
registerUnzipFormat('7zip', ['.7z'], un7zip)
installRarSupport()
install7zipSupport()
| gpl-3.0 | 7,436,011,763,455,285,000 | 31.117647 | 80 | 0.695971 | false | 3.903485 | false | false | false |
aio-libs/aioredis | aioredis/connection.py | 1 | 59561 | import asyncio
import errno
import inspect
import io
import os
import socket
import ssl
import threading
import time
import warnings
from distutils.version import StrictVersion
from itertools import chain
from typing import (
Any,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import ParseResult, parse_qs, unquote, urlparse
import async_timeout
from .compat import Protocol, TypedDict
from .exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
ExecAbortError,
InvalidResponse,
ModuleError,
NoPermissionError,
NoScriptError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
)
from .utils import str_if_bytes
NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
BlockingIOError: errno.EWOULDBLOCK,
ssl.SSLWantReadError: 2,
ssl.SSLWantWriteError: 2,
ssl.SSLError: 2,
}
NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
try:
import hiredis
except (ImportError, ModuleNotFoundError):
HIREDIS_AVAILABLE = False
else:
HIREDIS_AVAILABLE = True
hiredis_version = StrictVersion(hiredis.__version__)
if hiredis_version < StrictVersion("1.0.0"):
warnings.warn(
"aioredis supports hiredis @ 1.0.0 or higher. "
f"You have hiredis @ {hiredis.__version__}. "
"Pure-python parser will be used instead."
)
HIREDIS_AVAILABLE = False
SYM_STAR = b"*"
SYM_DOLLAR = b"$"
SYM_CRLF = b"\r\n"
SYM_LF = b"\n"
SYM_EMPTY = b""
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
MODULE_EXPORTS_DATA_TYPES_ERROR = (
"Error unloading module: the module "
"exports one or more module-side data "
"types, can't unload"
)
EncodedT = Union[bytes, memoryview]
DecodedT = Union[str, int, float]
EncodableT = Union[EncodedT, DecodedT, None]
class Encoder:
"""Encode strings to bytes-like and decode bytes-like to strings"""
__slots__ = "encoding", "encoding_errors", "decode_responses"
def __init__(self, encoding: str, encoding_errors: str, decode_responses: bool):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value: EncodableT) -> EncodedT:
"""Return a bytestring or bytes-like representation of the value"""
if isinstance(value, (bytes, memoryview)):
return value
if isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError(
"Invalid input of type: 'bool'. "
"Convert to a bytes, string, int or float first."
)
if isinstance(value, (int, float)):
return repr(value).encode()
if not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = value.__class__.__name__
raise DataError(
f"Invalid input of type: {typename!r}. "
"Convert to a bytes, string, int or float first."
)
if isinstance(value, str):
return value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value: EncodableT, force=False) -> EncodableT:
"""Return a unicode string from the bytes-like representation"""
if self.decode_responses or force:
if isinstance(value, memoryview):
return value.tobytes().decode(self.encoding, self.encoding_errors)
if isinstance(value, bytes):
return value.decode(self.encoding, self.encoding_errors)
return value
ExceptionMappingT = Mapping[str, Union[Type[Exception], Mapping[str, Type[Exception]]]]
class BaseParser:
"""Plain Python parsing class"""
__slots__ = "_stream", "_buffer", "_read_size"
EXCEPTION_CLASSES: ExceptionMappingT = {
"ERR": {
"max number of clients reached": ConnectionError,
"Client sent AUTH, but no password is set": AuthenticationError,
"invalid password": AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
"wrong number of arguments for 'auth' command": AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
"wrong number of arguments for 'AUTH' command": AuthenticationWrongNumberOfArgsError,
MODULE_LOAD_ERROR: ModuleError,
MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
NO_SUCH_MODULE_ERROR: ModuleError,
MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
},
"EXECABORT": ExecAbortError,
"LOADING": BusyLoadingError,
"NOSCRIPT": NoScriptError,
"READONLY": ReadOnlyError,
"NOAUTH": AuthenticationError,
"NOPERM": NoPermissionError,
}
def __init__(self, socket_read_size: int):
self._stream: Optional[asyncio.StreamReader] = None
self._buffer: Optional[SocketBuffer] = None
self._read_size = socket_read_size
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def parse_error(self, response: str) -> ResponseError:
"""Parse an error response"""
error_code = response.split(" ")[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1 :]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
def on_disconnect(self):
raise NotImplementedError()
def on_connect(self, connection: "Connection"):
raise NotImplementedError()
async def can_read(self, timeout: float) -> bool:
raise NotImplementedError()
async def read_response(self) -> Union[EncodableT, ResponseError, None]:
raise NotImplementedError()
class SocketBuffer:
"""Async-friendly re-impl of redis-py's SocketBuffer.
TODO: We're currently passing through two buffers,
the asyncio.StreamReader and this. I imagine we can reduce the layers here
while maintaining compliance with prior art.
"""
def __init__(
self,
stream_reader: asyncio.StreamReader,
socket_read_size: int,
socket_timeout: float,
):
self._stream = stream_reader
self.socket_read_size = socket_read_size
self.socket_timeout = socket_timeout
self._buffer = io.BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
async def _read_from_socket(
self,
length: int = None,
timeout: Optional[float] = SENTINEL, # type: ignore
raise_on_timeout: bool = True,
) -> bool:
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
timeout = timeout if timeout is not SENTINEL else self.socket_timeout
try:
while True:
async with async_timeout.timeout(timeout):
data = await self._stream.read(self.socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
return True
except (socket.timeout, asyncio.TimeoutError):
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
async def can_read(self, timeout: float) -> bool:
return bool(self.length) or await self._read_from_socket(
timeout=timeout, raise_on_timeout=False
)
async def read(self, length: int) -> bytes:
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
await self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
async def readline(self) -> bytes:
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
await self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except Exception:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._stream = None
class PythonParser(BaseParser):
"""Plain Python parsing class"""
__slots__ = BaseParser.__slots__ + ("encoder",)
def __init__(self, socket_read_size: int):
super().__init__(socket_read_size)
self.encoder: Optional[Encoder] = None
def on_connect(self, connection: "Connection"):
"""Called when the stream connects"""
self._stream = connection._reader
self._buffer = SocketBuffer(
self._stream, self._read_size, connection.socket_timeout
)
self.encoder = connection.encoder
def on_disconnect(self):
"""Called when the stream disconnects"""
if self._stream is not None:
self._stream = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoder = None
async def can_read(self, timeout: float):
return self._buffer and bool(await self._buffer.can_read(timeout))
async def read_response(self) -> Union[EncodableT, ResponseError, None]:
if not self._buffer:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
raw = await self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
response: Any
byte, response = raw[:1], raw[1:]
if byte not in (b"-", b"+", b":", b"$", b"*"):
raise InvalidResponse(f"Protocol Error: {raw!r}")
# server returned an error
if byte == b"-":
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# int value
elif byte == b":":
response = int(response)
# bulk response
elif byte == b"$":
length = int(response)
if length == -1:
return None
response = await self._buffer.read(length)
# multi-bulk response
elif byte == b"*":
length = int(response)
if length == -1:
return None
response = [(await self.read_response()) for i in range(length)]
if isinstance(response, bytes):
response = self.encoder.decode(response)
return response
class HiredisParser(BaseParser):
"""Parser class for connections using Hiredis"""
__slots__ = BaseParser.__slots__ + ("_next_response", "_reader", "_socket_timeout")
def __init__(self, socket_read_size: int):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not available.")
super().__init__(socket_read_size=socket_read_size)
self._next_response = ...
self._reader: Optional[hiredis.Reader] = None
self._socket_timeout: Optional[float] = None
def on_connect(self, connection: "Connection"):
self._stream = connection._reader
kwargs = {
"protocolError": InvalidResponse,
"replyError": self.parse_error,
}
if connection.encoder.decode_responses:
kwargs.update(
encoding=connection.encoder.encoding,
errors=connection.encoder.encoding_errors,
)
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
self._socket_timeout = connection.socket_timeout
def on_disconnect(self):
self._stream = None
self._reader = None
self._next_response = False
async def can_read(self, timeout: float):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
if self._next_response is False:
return await self.read_from_socket(timeout=timeout, raise_on_timeout=False)
return True
async def read_from_socket(
self, timeout: Optional[float] = SENTINEL, raise_on_timeout: bool = True
):
timeout = self._socket_timeout if timeout is SENTINEL else timeout
try:
async with async_timeout.timeout(timeout):
buffer = await self._stream.read(self._read_size)
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
self._reader.feed(buffer)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
except asyncio.CancelledError:
raise
except (socket.timeout, asyncio.TimeoutError):
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket") from None
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
async def read_response(self) -> EncodableT:
if not self._stream or not self._reader:
self.on_disconnect()
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
while response is False:
await self.read_from_socket()
response = self._reader.gets()
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif (
isinstance(response, list)
and response
and isinstance(response[0], ConnectionError)
):
raise response[0]
return response
DefaultParser: Type[Union[PythonParser, HiredisParser]]
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class ConnectCallbackProtocol(Protocol):
def __call__(self, connection: "Connection"):
...
class AsyncConnectCallbackProtocol(Protocol):
async def __call__(self, connection: "Connection"):
...
ConnectCallbackT = Union[ConnectCallbackProtocol, AsyncConnectCallbackProtocol]
class Connection:
"""Manages TCP communication to and from a Redis server"""
__slots__ = (
"pid",
"host",
"port",
"db",
"username",
"client_name",
"password",
"socket_timeout",
"socket_connect_timeout",
"socket_keepalive",
"socket_keepalive_options",
"socket_type",
"retry_on_timeout",
"health_check_interval",
"next_health_check",
"last_active_at",
"encoder",
"ssl_context",
"_reader",
"_writer",
"_parser",
"_connect_callbacks",
"_buffer_cutoff",
"_loop",
"__dict__",
)
def __init__(
self,
*,
host: str = "localhost",
port: Union[str, int] = 6379,
db: Union[str, int] = 0,
password: str = None,
socket_timeout: float = None,
socket_connect_timeout: float = None,
socket_keepalive: bool = False,
socket_keepalive_options: dict = None,
socket_type: int = 0,
retry_on_timeout: bool = False,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
parser_class: Type[BaseParser] = DefaultParser,
socket_read_size: int = 65536,
health_check_interval: int = 0,
client_name: str = None,
username: str = None,
encoder_class: Type[Encoder] = Encoder,
loop: asyncio.AbstractEventLoop = None,
):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout or None
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.socket_type = socket_type
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.ssl_context: Optional[RedisSSLContext] = None
self.encoder = encoder_class(encoding, encoding_errors, decode_responses)
self._reader: Optional[asyncio.StreamReader] = None
self._writer: Optional[asyncio.StreamWriter] = None
self._parser = parser_class(
socket_read_size=socket_read_size,
)
self._connect_callbacks: List[ConnectCallbackT] = []
self._buffer_cutoff = 6000
self._loop = loop
def __repr__(self):
repr_args = ",".join((f"{k}={v}" for k, v in self.repr_pieces()))
return f"{self.__class__.__name__}<{repr_args}>"
def repr_pieces(self):
pieces = [("host", self.host), ("port", self.port), ("db", self.db)]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
def __del__(self):
try:
if self.is_connected:
loop = self._loop or asyncio.get_event_loop()
coro = self.disconnect()
if loop.is_running():
loop.create_task(coro)
else:
loop.run_until_complete(self.disconnect())
except Exception:
pass
@property
def is_connected(self):
return bool(self._reader and self._writer)
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
async def connect(self):
"""Connects to the Redis server if not already connected"""
if self.is_connected:
return
try:
await self._connect()
except asyncio.CancelledError:
raise
except (socket.timeout, asyncio.TimeoutError):
raise TimeoutError("Timeout connecting to server")
except OSError as e:
raise ConnectionError(self._error_message(e))
except Exception as exc:
raise ConnectionError(exc) from exc
try:
await self.on_connect()
except RedisError:
# clean up after any error in on_connect
await self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
task = callback(self)
if task and inspect.isawaitable(task):
await task
async def _connect(self):
"""Create a TCP socket connection"""
with async_timeout.timeout(self.socket_connect_timeout):
reader, writer = await asyncio.open_connection(
host=self.host, port=self.port, ssl=self.ssl_context, loop=self._loop
)
self._reader = reader
self._writer = writer
sock = writer.transport.get_extra_info("socket")
if sock is not None:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in self.socket_keepalive_options.items():
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_timeout now that we're connected
if self.socket_timeout is not None:
sock.settimeout(self.socket_timeout)
except (OSError, TypeError):
# `socket_keepalive_options` might contain invalid options
# causing an error. Do not leave the connection open.
writer.close()
raise
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return f"Error connecting to {self.host}:{self.port}. {exception.args[0]}."
else:
return (
f"Error {exception.args[0]} connecting to {self.host}:{self.port}. "
f"{exception.args[0]}."
)
async def on_connect(self):
"""Initialize the connection, authenticate and select a database"""
self._parser.on_connect(self)
# if username and/or password are set, authenticate
if self.username or self.password:
if self.username:
auth_args = (self.username, self.password or "")
else:
auth_args = (self.password,)
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
await self.send_command("AUTH", *auth_args, check_health=False)
try:
auth_response = await self.read_response()
except AuthenticationWrongNumberOfArgsError:
# a username and password were specified but the Redis
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
await self.send_command("AUTH", self.password, check_health=False)
auth_response = await self.read_response()
if str_if_bytes(auth_response) != "OK":
raise AuthenticationError("Invalid Username or Password")
# if a client_name is given, set it
if self.client_name:
await self.send_command("CLIENT", "SETNAME", self.client_name)
if str_if_bytes(self.read_response()) != "OK":
raise ConnectionError("Error setting client name")
# if a database is specified, switch to it
if self.db:
await self.send_command("SELECT", self.db)
if str_if_bytes(await self.read_response()) != "OK":
raise ConnectionError("Invalid Database")
async def disconnect(self):
"""Disconnects from the Redis server"""
try:
async with async_timeout.timeout(self.socket_connect_timeout):
self._parser.on_disconnect()
if not self.is_connected:
return
try:
if os.getpid() == self.pid:
self._writer.close()
# py3.6 doesn't have this method
if hasattr(self._writer, "wait_closed"):
await self._writer.wait_closed()
except OSError:
pass
self._reader = None
self._writer = None
except asyncio.TimeoutError:
raise TimeoutError(
f"Timed out closing connection after {self.socket_connect_timeout}"
) from None
async def check_health(self):
"""Check the health of the connection with a PING/PONG"""
if self.health_check_interval and time.time() > self.next_health_check:
try:
await self.send_command("PING", check_health=False)
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError("Bad response from PING health check")
except (ConnectionError, TimeoutError) as err:
await self.disconnect()
try:
await self.send_command("PING", check_health=False)
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError(
"Bad response from PING health check"
) from None
except BaseException as err2:
raise err2 from err
async def send_packed_command(
self,
command: Union[bytes, str, Iterable[Union[bytes, str]]],
check_health: bool = True,
):
"""Send an already packed command to the Redis server"""
if not self._writer:
await self.connect()
# guard against health check recursion
if check_health:
await self.check_health()
try:
if isinstance(command, str):
command = command.encode()
if isinstance(command, bytes):
command = [command]
self._writer.writelines(command)
await self._writer.drain()
except asyncio.TimeoutError:
await self.disconnect()
raise TimeoutError("Timeout writing to socket") from None
except OSError as e:
await self.disconnect()
if len(e.args) == 1:
errno, errmsg = "UNKNOWN", e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
raise ConnectionError(
f"Error {errno} while writing to socket. {errmsg}."
) from e
except BaseException:
await self.disconnect()
raise
async def send_command(self, *args, **kwargs):
"""Pack and send a command to the Redis server"""
if not self.is_connected:
await self.connect()
await self.send_packed_command(
self.pack_command(*args), check_health=kwargs.get("check_health", True)
)
async def can_read(self, timeout: float = 0):
"""Poll the socket to see if there's data that can be read."""
if not self.is_connected:
await self.connect()
return await self._parser.can_read(timeout)
async def read_response(self):
"""Read the response from a previously sent command"""
try:
with async_timeout.timeout(self.socket_timeout):
response = await self._parser.read_response()
except asyncio.TimeoutError:
await self.disconnect()
raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
except BaseException:
await self.disconnect()
raise
if self.health_check_interval:
self.next_health_check = time.time() + self.health_check_interval
if isinstance(response, ResponseError):
raise response from None
return response
def pack_command(self, *args: EncodableT) -> List[bytes]:
"""Pack a series of arguments into the Redis protocol"""
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. These arguments should be bytestrings so that they are
# not encoded.
if isinstance(args[0], str):
args = tuple(args[0].encode().split()) + args[1:]
elif b" " in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
buffer_cutoff = self._buffer_cutoff
for arg in map(self.encoder.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
if (
len(buff) > buffer_cutoff
or arg_length > buffer_cutoff
or isinstance(arg, memoryview)
):
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
)
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
(
buff,
SYM_DOLLAR,
str(arg_length).encode(),
SYM_CRLF,
arg,
SYM_CRLF,
)
)
output.append(buff)
return output
def pack_commands(self, commands: Iterable[Iterable[EncodableT]]) -> List[bytes]:
"""Pack multiple commands into the Redis protocol"""
output: List[bytes] = []
pieces: List[bytes] = []
buffer_length = 0
buffer_cutoff = self._buffer_cutoff
for cmd in commands:
for chunk in self.pack_command(*cmd):
chunklen = len(chunk)
if (
buffer_length > buffer_cutoff
or chunklen > buffer_cutoff
or isinstance(chunk, memoryview)
):
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if chunklen > buffer_cutoff or isinstance(chunk, memoryview):
output.append(chunk)
else:
pieces.append(chunk)
buffer_length += chunklen
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
def __init__(
self,
ssl_keyfile: str = None,
ssl_certfile: str = None,
ssl_cert_reqs: str = "required",
ssl_ca_certs: str = None,
ssl_check_hostname: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.ssl_context = RedisSSLContext(
keyfile=ssl_keyfile,
certfile=ssl_certfile,
cert_reqs=ssl_cert_reqs,
ca_certs=ssl_ca_certs,
check_hostname=ssl_check_hostname,
)
@property
def keyfile(self):
return self.ssl_context.keyfile
@property
def certfile(self):
return self.ssl_context.certfile
@property
def cert_reqs(self):
return self.ssl_context.cert_reqs
@property
def ca_certs(self):
return self.ssl_context.ca_certs
@property
def check_hostname(self):
return self.ssl_context.check_hostname
class RedisSSLContext:
__slots__ = (
"keyfile",
"certfile",
"cert_reqs",
"ca_certs",
"context",
"check_hostname",
)
def __init__(
self,
keyfile: str = None,
certfile: str = None,
cert_reqs: str = None,
ca_certs: str = None,
check_hostname: bool = False,
):
self.keyfile = keyfile
self.certfile = certfile
if cert_reqs is None:
self.cert_reqs = ssl.CERT_NONE
elif isinstance(cert_reqs, str):
CERT_REQS = {
"none": ssl.CERT_NONE,
"optional": ssl.CERT_OPTIONAL,
"required": ssl.CERT_REQUIRED,
}
if cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" % cert_reqs
)
self.cert_reqs = CERT_REQS[cert_reqs]
self.ca_certs = ca_certs
self.check_hostname = check_hostname
self.context = None
def get(self) -> ssl.SSLContext:
if not self.context:
context = ssl.create_default_context()
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.certfile and self.keyfile:
context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)
if self.ca_certs:
context.load_verify_locations(self.ca_certs)
self.context = context
return self.context
class UnixDomainSocketConnection(Connection): # lgtm [py/missing-call-to-init]
def __init__(
self,
*,
path: str = "",
db: Union[str, int] = 0,
username: str = None,
password: str = None,
socket_timeout: float = None,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
retry_on_timeout: bool = False,
parser_class: Type[BaseParser] = DefaultParser,
socket_read_size: int = 65536,
health_check_interval: float = 0.0,
client_name=None,
loop: asyncio.AbstractEventLoop = None,
):
self.pid = os.getpid()
self.path = path
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
self._loop = loop
def repr_pieces(self) -> Iterable[Tuple[str, Union[str, int]]]:
pieces = [
("path", self.path),
("db", self.db),
]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
async def _connect(self):
with async_timeout.timeout(self._connect_timeout):
reader, writer = await asyncio.open_unix_connection(path=self.path)
self._reader = reader
self._writer = writer
await self.on_connect()
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return f"Error connecting to unix socket: {self.path}. {exception.args[0]}."
else:
return (
f"Error {exception.args[0]} connecting to unix socket: "
f"{self.path}. {exception.args[1]}."
)
FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
def to_bool(value) -> bool:
if value is None or value == "":
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
"db": int,
"socket_timeout": float,
"socket_connect_timeout": float,
"socket_keepalive": to_bool,
"retry_on_timeout": to_bool,
"max_connections": int,
"health_check_interval": int,
"ssl_check_hostname": to_bool,
}
class ConnectKwargs(TypedDict, total=False):
username: str
password: str
connection_class: Type[Connection]
host: str
port: int
db: int
def parse_url(url: str) -> ConnectKwargs:
parsed: ParseResult = urlparse(url)
kwargs: ConnectKwargs = {}
for name, value in parse_qs(parsed.query).items():
if value and len(value) > 0:
value = unquote(value[0])
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
kwargs[name] = parser(value)
except (TypeError, ValueError):
raise ValueError("Invalid value for `%s` in connection URL." % name)
else:
kwargs[name] = value
if parsed.username:
kwargs["username"] = unquote(parsed.username)
if parsed.password:
kwargs["password"] = unquote(parsed.password)
# We only support redis://, rediss:// and unix:// schemes.
if parsed.scheme == "unix":
if parsed.path:
kwargs["path"] = unquote(parsed.path)
kwargs["connection_class"] = UnixDomainSocketConnection
elif parsed.scheme in ("redis", "rediss"):
if parsed.hostname:
kwargs["host"] = unquote(parsed.hostname)
if parsed.port:
kwargs["port"] = int(parsed.port)
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if parsed.path and "db" not in kwargs:
try:
kwargs["db"] = int(unquote(parsed.path).replace("/", ""))
except (AttributeError, ValueError):
pass
if parsed.scheme == "rediss":
kwargs["connection_class"] = SSLConnection
else:
valid_schemes = "redis://, rediss://, unix://"
raise ValueError(
"Redis URL must specify one of the following "
"schemes (%s)" % valid_schemes
)
return kwargs
_CP = TypeVar("_CP")
class ConnectionPool:
"""
Create a connection pool. ``If max_connections`` is set, then this
object raises :py:class:`~redis.ConnectionError` when the pool's
limit is reached.
By default, TCP connections are created unless ``connection_class``
is specified. Use :py:class:`~redis.UnixDomainSocketConnection` for
unix sockets.
Any additional keyword arguments are passed to the constructor of
``connection_class``.
"""
@classmethod
def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP:
"""
Return a connection pool configured from the given URL.
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- `redis://` creates a TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/redis>
- `rediss://` creates a SSL wrapped TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/rediss>
- ``unix://``: creates a Unix Domain Socket connection.
The username, password, hostname, path and all querystring values
are passed through urllib.parse.unquote in order to replace any
percent-encoded values with their corresponding characters.
There are several ways to specify a database number. The first value
found will be used:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// or rediss:// schemes, the path argument
of the url, e.g. redis://localhost/0
3. A ``db`` keyword argument to this function.
If none of these options are specified, the default db=0 is used.
All querystring options are cast to their appropriate Python types.
Boolean arguments can be specified with string values "True"/"False"
or "Yes"/"No". Values that cannot be properly cast cause a
``ValueError`` to be raised. Once parsed, the querystring arguments
and keyword arguments are passed to the ``ConnectionPool``'s
class initializer. In the case of conflicting arguments, querystring
arguments always win.
"""
url_options = parse_url(url)
kwargs.update(url_options)
return cls(**kwargs)
def __init__(
self,
connection_class: Type[Connection] = Connection,
max_connections: int = None,
**connection_kwargs,
):
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, int) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
# a lock to protect the critical section in _checkpid().
# this lock is acquired when the process id changes, such as
# after a fork. during this time, multiple threads in the child
# process could attempt to acquire this lock. the first thread
# to acquire the lock will reset the data structures and lock
# object of this pool. subsequent threads acquiring this lock
# will notice the first thread already did the work and simply
# release the lock.
self._fork_lock = threading.Lock()
self._lock: asyncio.Lock
self._created_connections: int
self._available_connections: List[Connection]
self._in_use_connections: Set[Connection]
self.reset() # lgtm [py/init-calls-subclass]
self.loop = self.connection_kwargs.get("loop")
self.encoder_class = self.connection_kwargs.get("encoder_class", Encoder)
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"<{self.connection_class(**self.connection_kwargs)!r}>"
)
def reset(self):
self._lock = asyncio.Lock()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def _checkpid(self):
# _checkpid() attempts to keep ConnectionPool fork-safe on modern
# systems. this is called by all ConnectionPool methods that
# manipulate the pool's state such as get_connection() and release().
#
# _checkpid() determines whether the process has forked by comparing
# the current process id to the process id saved on the ConnectionPool
# instance. if these values are the same, _checkpid() simply returns.
#
# when the process ids differ, _checkpid() assumes that the process
# has forked and that we're now running in the child process. the child
# process cannot use the parent's file descriptors (e.g., sockets).
# therefore, when _checkpid() sees the process id change, it calls
# reset() in order to reinitialize the child's ConnectionPool. this
# will cause the child to make all new connection objects.
#
# _checkpid() is protected by self._fork_lock to ensure that multiple
# threads in the child process do not call reset() multiple times.
#
# there is an extremely small chance this could fail in the following
# scenario:
# 1. process A calls _checkpid() for the first time and acquires
# self._fork_lock.
# 2. while holding self._fork_lock, process A forks (the fork()
# could happen in a different thread owned by process A)
# 3. process B (the forked child process) inherits the
# ConnectionPool's state from the parent. that state includes
# a locked _fork_lock. process B will not be notified when
# process A releases the _fork_lock and will thus never be
# able to acquire the _fork_lock.
#
# to mitigate this possible deadlock, _checkpid() will only wait 5
# seconds to acquire _fork_lock. if _fork_lock cannot be acquired in
# that time it is assumed that the child is deadlocked and a
# redis.ChildDeadlockedError error is raised.
if self.pid != os.getpid():
acquired = self._fork_lock.acquire(timeout=5)
if not acquired:
raise ChildDeadlockedError
# reset() the instance for the new process if another thread
# hasn't already done so
try:
if self.pid != os.getpid():
self.reset()
finally:
self._fork_lock.release()
async def get_connection(self, command_name, *keys, **options):
"""Get a connection from the pool"""
self._checkpid()
async with self._lock:
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
try:
# ensure this connection is connected to Redis
await connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if await connection.can_read():
raise ConnectionError("Connection has data") from None
except ConnectionError:
await connection.disconnect()
await connection.connect()
if await connection.can_read():
raise ConnectionError("Connection not ready") from None
except BaseException:
# release the connection back to the pool so that we don't
# leak it
await self.release(connection)
raise
return connection
def get_encoder(self):
"""Return an encoder based on encoding settings"""
kwargs = self.connection_kwargs
return self.encoder_class(
encoding=kwargs.get("encoding", "utf-8"),
encoding_errors=kwargs.get("encoding_errors", "strict"),
decode_responses=kwargs.get("decode_responses", False),
)
def make_connection(self):
"""Create a new connection"""
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
async def release(self, connection: Connection):
"""Releases the connection back to the pool"""
self._checkpid()
async with self._lock:
try:
self._in_use_connections.remove(connection)
except KeyError:
# Gracefully fail when a connection is returned to this pool
# that the pool doesn't actually own
pass
if self.owns_connection(connection):
self._available_connections.append(connection)
else:
# pool doesn't own this connection. do not add it back
# to the pool and decrement the count so that another
# connection can take its place if needed
self._created_connections -= 1
await connection.disconnect()
return
def owns_connection(self, connection: Connection):
return connection.pid == self.pid
async def disconnect(self, inuse_connections: bool = True):
"""
Disconnects connections in the pool
If ``inuse_connections`` is True, disconnect connections that are
current in use, potentially by other tasks. Otherwise only disconnect
connections that are idle in the pool.
"""
self._checkpid()
async with self._lock:
if inuse_connections:
connections = chain(
self._available_connections, self._in_use_connections
)
else:
connections = self._available_connections
resp = await asyncio.gather(
*(connection.disconnect() for connection in connections),
return_exceptions=True,
)
exc = next((r for r in resp if isinstance(r, BaseException)), None)
if exc:
raise exc
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from aioredis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
:py:class:`~redis.ConnectionPool` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a :py:class:`~redis.ConnectionError` (as the default
:py:class:`~redis.ConnectionPool` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
>>> # Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
>>> # Raise a ``ConnectionError`` after five seconds if a connection is
>>> # not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(
self,
max_connections: int = 50,
timeout: Optional[int] = 20,
connection_class: Type[Connection] = Connection,
queue_class: Type[asyncio.Queue] = asyncio.LifoQueue,
**connection_kwargs,
):
self.queue_class = queue_class
self.timeout = timeout
self._connections: List[Connection]
super().__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs,
)
def reset(self):
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except asyncio.QueueFull:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def make_connection(self):
"""Make a fresh connection."""
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
async def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
async with async_timeout.timeout(self.timeout):
connection = await self.pool.get()
except (asyncio.QueueEmpty, asyncio.TimeoutError):
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
try:
# ensure this connection is connected to Redis
await connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if await connection.can_read():
raise ConnectionError("Connection has data") from None
except ConnectionError:
await connection.disconnect()
await connection.connect()
if await connection.can_read():
raise ConnectionError("Connection not ready") from None
except BaseException:
# release the connection back to the pool so that we don't leak it
await self.release(connection)
raise
return connection
async def release(self, connection: Connection):
"""Releases the connection back to the pool."""
# Make sure we haven't changed process.
self._checkpid()
if not self.owns_connection(connection):
# pool doesn't own this connection. do not add it back
# to the pool. instead add a None value which is a placeholder
# that will cause the pool to recreate the connection if
# its needed.
await connection.disconnect()
self.pool.put_nowait(None)
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except asyncio.QueueFull:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
async def disconnect(self, inuse_connections: bool = True):
"""Disconnects all connections in the pool."""
self._checkpid()
async with self._lock:
resp = await asyncio.gather(
*(connection.disconnect() for connection in self._connections),
return_exceptions=True,
)
exc = next((r for r in resp if isinstance(r, BaseException)), None)
if exc:
raise exc
| mit | 5,627,629,003,666,229,000 | 36.155958 | 97 | 0.589312 | false | 4.512539 | false | false | false |
burgerdev/volumina | volumina/utility/preferencesManager.py | 1 | 4045 | ###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import threading
import cPickle as pickle
from volumina.utility import Singleton
class PreferencesManager():
# TODO: Maybe this should be a wrapper API around QSettings (but with pickle strings)
# Pros:
# - Settings would be stored in standard locations for each platform
# Cons:
# - QT dependency (currently there are no non-gui preferences, but maybe someday)
__metaclass__ = Singleton
def get(self, group, setting, default=None):
try:
return self._prefs[group][setting]
except KeyError:
return default
def set(self, group, setting, value):
if group not in self._prefs:
self._prefs[group] = {}
if setting not in self._prefs[group] or self._prefs[group][setting] != value:
self._prefs[group][setting] = value
self._dirty = True
if not self._poolingSave:
self._save()
def __init__(self):
self._filePath = os.path.expanduser('~/.ilastik_preferences')
self._lock = threading.Lock()
self._prefs = self._load()
self._poolingSave = False
self._dirty = False
def _load(self):
with self._lock:
if not os.path.exists(self._filePath):
return {}
else:
try:
with open(self._filePath, 'rb') as f:
return pickle.load(f)
except EOFError:
os.remove(self._filePath)
return {}
def _save(self):
if self._dirty:
with self._lock:
with open(self._filePath, 'wb') as f:
pickle.dump(self._prefs, f)
self._dirty = False
# We support the 'with' keyword, in which case a sequence of settings can be set,
# and the preferences file won't be updated until the __exit__ function is called.
# (Otherwise, each call to set() triggers a new save.)
def __enter__(self):
self._poolingSave = True
return self
def __exit__(self, *args):
self._poolingSave = False
self._save()
if __name__ == "__main__":
prefsMgr = PreferencesManager()
prefsMgr2 = PreferencesManager()
assert id(prefsMgr) == id(prefsMgr2), "It's supposed to be a singleton!"
with PreferencesManager() as prefsMgr:
prefsMgr.set("Group 1", "Setting1", [1,2,3])
prefsMgr.set("Group 1", "Setting2", ['a', 'b', 'c'])
prefsMgr.set("Group 2", "Setting1", "Forty-two")
# Force a new instance
PreferencesManager.instance = None
prefsMgr = PreferencesManager()
assert prefsMgr != prefsMgr2, "For this test, I want a separate instance"
assert prefsMgr.get("Group 1", "Setting1") == [1,2,3]
assert prefsMgr.get("Group 1", "Setting2") == ['a', 'b', 'c']
assert prefsMgr.get("Group 2", "Setting1") == "Forty-two"
| lgpl-3.0 | -6,503,057,252,922,043,000 | 36.453704 | 93 | 0.578245 | false | 4.235602 | false | false | false |
pydanny/dj-stripe | tests/test_source.py | 1 | 3473 | """
dj-stripe Card Model Tests.
"""
import sys
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from djstripe.models import Source
from . import (
FAKE_CUSTOMER_III,
FAKE_SOURCE,
FAKE_SOURCE_II,
AssertStripeFksMixin,
SourceDict,
default_account,
)
class SourceTest(AssertStripeFksMixin, TestCase):
def setUp(self):
self.account = default_account()
self.user = get_user_model().objects.create_user(
username="testuser", email="[email protected]"
)
self.customer = FAKE_CUSTOMER_III.create_for_user(self.user)
self.customer.sources.all().delete()
self.customer.legacy_cards.all().delete()
def test_attach_objects_hook_without_customer(self):
source = Source.sync_from_stripe_data(deepcopy(FAKE_SOURCE_II))
self.assertEqual(source.customer, None)
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Source.customer",
"djstripe.Customer.default_payment_method",
},
)
def test_sync_source_finds_customer(self):
source = Source.sync_from_stripe_data(deepcopy(FAKE_SOURCE))
self.assertEqual(self.customer, source.customer)
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
},
)
def test_str(self):
fake_source = deepcopy(FAKE_SOURCE)
source = Source.sync_from_stripe_data(fake_source)
self.assertEqual("<id={}>".format(fake_source["id"]), str(source))
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
},
)
@patch("stripe.Source.retrieve", return_value=deepcopy(FAKE_SOURCE), autospec=True)
def test_detach(self, source_retrieve_mock):
original_detach = SourceDict.detach
def mocked_detach(self):
return original_detach(self)
Source.sync_from_stripe_data(deepcopy(FAKE_SOURCE))
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertEqual(1, self.customer.sources.count())
source = self.customer.sources.first()
with patch(
"tests.SourceDict.detach", side_effect=mocked_detach, autospec=True
) as mock_detach:
source.detach()
self.assertEqual(0, self.customer.sources.count())
# need to refresh_from_db since default_source was cleared with a query
self.customer.refresh_from_db()
self.assertIsNone(self.customer.default_source)
# need to refresh_from_db due to the implementation of Source.detach() -
# see TODO in method
source.refresh_from_db()
self.assertIsNone(source.customer)
self.assertEqual(source.status, "consumed")
if sys.version_info >= (3, 6):
# this mock isn't working on py34, py35, but it's not strictly necessary
# for the test
mock_detach.assert_called()
self.assert_fks(
source,
expected_blank_fks={
"djstripe.Source.customer",
"djstripe.Customer.default_payment_method",
},
)
| bsd-3-clause | 3,751,142,363,640,409,000 | 29.734513 | 87 | 0.610135 | false | 4.029002 | true | false | false |
Mause/pyalp | pyalp/gs_interface/interface.py | 1 | 2747 | from os.path import join, dirname
HERE = dirname(__file__)
import logging
from apps.tournaments.models import Server, GameRequest
from .rpc_client import get_interface
import yaml
resource_load = lambda name: yaml.load(open(
join(HERE, 'resources', name)
))
game_ports = resource_load('game_ports.yaml')
game_names = resource_load('game_names.yaml')
interface = get_interface()
class NonExistantProtocol(Exception):
pass
class CouldNotReachServer(Exception):
pass
def calcqport(port, qgame):
assert qgame, qgame
assert qgame in game_ports, "Game Type not a valid type: {}".format(qgame)
portdiff = game_ports[qgame]
# check out value:
if portdiff[0] == '+': # if it starts with a + or -, it's an offset.
return port + int(portdiff[1:])
elif portdiff[0] == '-': # if it's 0, it means no change.
return port - int(portdiff[1:])
elif portdiff[0] == '0': # anything else is a static port.
return port
else:
return portdiff
def _query_server(
serv, address, port, protocol,
get_players=False, get_rules=False):
qport = calcqport(port, serv.game.short)
if qport is False: # zero could be returned and eval'd as False
print("Unable to calculate query port for address")
else:
port = qport
logging.debug(port, "==>", qport)
logging.debug("querying {}:{} over the {} protocol".format(
address, port, protocol
))
if not interface.protocol_exists(protocol):
raise NonExistantProtocol(protocol)
return interface.query_server(
protocol,
address,
port,
get_players,
get_rules
)
def queryServer(address, port, protocol, get_players=False, get_rules=False):
logging.info('queryServer request for {}:{} for {}'.format(
address, port, protocol
))
result = Server.objects.filter(
ipaddress=address, game__querystr2=protocol
).select_related('game')
if not result:
result = GameRequest.objects.filter(
ipaddress=address, game__querystr2=protocol
)
if not result:
raise CouldNotReachServer(
'{}:{} with protocol {}'.format(address, port, protocol)
)
return _query_server(
address, port, protocol,
get_players, get_rules
)
def query_server_from_instance(serv, get_players=False, get_rules=False):
return _query_server(
serv,
serv.address,
serv.queryport,
serv.game.engine_type,
get_players,
get_rules
)
def game_title(gamename):
gamename = gamename.lower()
try:
return game_names[gamename]
except KeyError:
return "Game Status"
| mit | -7,299,512,025,326,176,000 | 22.478632 | 78 | 0.626866 | false | 3.768176 | false | false | false |
guziy/basemap | setup.py | 1 | 6013 | from __future__ import (absolute_import, division, print_function)
import glob
import io
import os
import sys
from setuptools.dist import Distribution
if sys.version_info < (2, 6):
raise SystemExit("""matplotlib and the basemap toolkit require Python 2.6 or later.""")
# Do not require numpy for just querying the package
# Taken from the netcdf-python setup file (which took it from h5py setup file).
inc_dirs = []
if any('--' + opt in sys.argv for opt in Distribution.display_option_names +
['help-commands', 'help']) or sys.argv[1] == 'egg_info':
from setuptools import setup, Extension
else:
import numpy
# Use numpy versions if they are available.
from numpy.distutils.core import setup, Extension
# append numpy include dir.
inc_dirs.append(numpy.get_include())
def get_install_requirements(path):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding='utf-8') as fp:
content = fp.read()
return [req for req in content.split("\n")
if req != '' and not req.startswith('#')]
def checkversion(GEOS_dir):
"""check geos C-API header file (geos_c.h)"""
try:
f = open(os.path.join(GEOS_dir, 'include', 'geos_c.h'))
except IOError:
return None
geos_version = None
for line in f:
if line.startswith('#define GEOS_VERSION'):
geos_version = line.split()[2]
return geos_version
# get location of geos lib from environment variable if it is set.
if 'GEOS_DIR' in os.environ:
GEOS_dir = os.environ.get('GEOS_DIR')
else:
# set GEOS_dir manually here if automatic detection fails.
GEOS_dir = None
user_home = os.path.expanduser('~')
geos_search_locations = [user_home, os.path.join(user_home, 'local'),
'/usr', '/usr/local', '/sw', '/opt', '/opt/local']
if GEOS_dir is None:
# if GEOS_dir not set, check a few standard locations.
GEOS_dirs = geos_search_locations
for direc in GEOS_dirs:
geos_version = checkversion(direc)
sys.stdout.write('checking for GEOS lib in %s ....\n' % direc)
if geos_version is None or geos_version < '"3.1.1"':
continue
else:
sys.stdout.write('GEOS lib (version %s) found in %s\n' %\
(geos_version[1:-1],direc))
GEOS_dir = direc
break
else:
geos_version = checkversion(GEOS_dir)
if GEOS_dir is None:
raise SystemExit("""
Can't find geos library in standard locations ('%s').
Please install the corresponding packages using your
systems software management system (e.g. for Debian Linux do:
'apt-get install libgeos-3.3.3 libgeos-c1 libgeos-dev' and/or
set the environment variable GEOS_DIR to point to the location
where geos is installed (for example, if geos_c.h
is in /usr/local/include, and libgeos_c is in /usr/local/lib,
set GEOS_DIR to /usr/local), or edit the setup.py script
manually and set the variable GEOS_dir (right after the line
that says "set GEOS_dir manually here".""" % "', '".join(geos_search_locations))
else:
geos_include_dirs=[os.path.join(GEOS_dir,'include')] + inc_dirs
geos_library_dirs=[os.path.join(GEOS_dir,'lib'),os.path.join(GEOS_dir,'lib64')]
packages = ['mpl_toolkits','mpl_toolkits.basemap']
namespace_packages = ['mpl_toolkits']
package_dirs = {'':'lib'}
# can't install _geoslib in mpl_toolkits.basemap namespace,
# or Basemap objects won't be pickleable.
# don't use runtime_library_dirs on windows (workaround
# for a distutils bug - http://bugs.python.org/issue2437).
if sys.platform == 'win32':
runtime_lib_dirs = []
else:
runtime_lib_dirs = geos_library_dirs
extensions = [ Extension("_geoslib",['src/_geoslib.c'],
library_dirs=geos_library_dirs,
runtime_library_dirs=runtime_lib_dirs,
include_dirs=geos_include_dirs,
libraries=['geos_c']) ]
# Specify all the required mpl data
pathout =\
os.path.join('lib',os.path.join('mpl_toolkits',os.path.join('basemap','data')))
datafiles = glob.glob(os.path.join(pathout,'*'))
datafiles = [os.path.join('data',os.path.basename(f)) for f in datafiles]
package_data = {'mpl_toolkits.basemap':datafiles}
install_requires = get_install_requirements("requirements.txt")
__version__ = "1.2.1"
setup(
name = "basemap",
version = __version__,
description = "Plot data on map projections with matplotlib",
long_description = """
An add-on toolkit for matplotlib that lets you plot data
on map projections with coastlines, lakes, rivers and political boundaries.
See http://matplotlib.org/basemap/users/examples.html for
examples of what it can do.""",
url = "https://matplotlib.org/basemap/",
download_url = "https://github.com/matplotlib/basemap/archive/v{0}rel.tar.gz".format(__version__),
author = "Jeff Whitaker",
author_email = "[email protected]",
maintainer = "Ben Root",
maintainer_email = "[email protected]",
install_requires = install_requires,
platforms = ["any"],
license = "OSI Approved",
keywords = ["python","plotting","plots","graphs","charts","GIS","mapping","map projections","maps"],
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent"],
packages = packages,
namespace_packages = namespace_packages,
package_dir = package_dirs,
ext_modules = extensions,
package_data = package_data
)
| gpl-2.0 | 8,225,000,807,982,011,000 | 38.559211 | 111 | 0.634791 | false | 3.653098 | false | false | false |
alexandrul-ci/robotframework | src/robot/utils/normalizing.py | 1 | 3987 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections import MutableMapping
from .platform import PY3
from .robottypes import is_dict_like
def normalize(string, ignore=(), caseless=True, spaceless=True):
"""Normalizes given string according to given spec.
By default string is turned to lower case and all whitespace is removed.
Additional characters can be removed by giving them in ``ignore`` list.
"""
empty = type(string)()
if PY3 and isinstance(ignore, bytes):
# Iterating bytes in Python3 yields integers.
ignore = [bytes([i]) for i in ignore]
if spaceless:
string = empty.join(string.split())
if caseless:
string = lower(string)
ignore = [lower(i) for i in ignore]
for ign in ignore:
if ign in string: # performance optimization
string = string.replace(ign, empty)
return string
# http://ironpython.codeplex.com/workitem/33133
if sys.platform == 'cli' and sys.version_info < (2, 7, 5):
def lower(string):
return ('A' + string).lower()[1:]
else:
def lower(string):
return string.lower()
class NormalizedDict(MutableMapping):
"""Custom dictionary implementation automatically normalizing keys."""
def __init__(self, initial=None, ignore=(), caseless=True, spaceless=True):
"""Initialized with possible initial value and normalizing spec.
Initial values can be either a dictionary or an iterable of name/value
pairs. In the latter case items are added in the given order.
Normalizing spec has exact same semantics as with the :func:`normalize`
function.
"""
self._data = {}
self._keys = {}
self._normalize = lambda s: normalize(s, ignore, caseless, spaceless)
if initial:
self._add_initial(initial)
def _add_initial(self, initial):
items = initial.items() if hasattr(initial, 'items') else initial
for key, value in items:
self[key] = value
def __getitem__(self, key):
return self._data[self._normalize(key)]
def __setitem__(self, key, value):
norm_key = self._normalize(key)
self._data[norm_key] = value
self._keys.setdefault(norm_key, key)
def __delitem__(self, key):
norm_key = self._normalize(key)
del self._data[norm_key]
del self._keys[norm_key]
def __iter__(self):
return (self._keys[norm_key] for norm_key in sorted(self._keys))
def __len__(self):
return len(self._data)
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % (key, self[key]) for key in self)
def __eq__(self, other):
if not is_dict_like(other):
return False
if not isinstance(other, NormalizedDict):
other = NormalizedDict(other)
return self._data == other._data
def __ne__(self, other):
return not self == other
def copy(self):
copy = NormalizedDict()
copy._data = self._data.copy()
copy._keys = self._keys.copy()
copy._normalize = self._normalize
return copy
# Speed-ups. Following methods are faster than default implementations.
def __contains__(self, key):
return self._normalize(key) in self._data
def clear(self):
self._data.clear()
self._keys.clear()
| apache-2.0 | -7,067,595,486,309,116,000 | 31.680328 | 79 | 0.635566 | false | 4.035425 | false | false | false |
joshishungry/artificial_intel | assignments/lab4/map_coloring_csp.py | 1 | 2656 | #!/usr/bin/env python
"""
Implementation of the Map coloring problem from 2006 Quiz 2
"""
import sys
from csp import CSP, Variable, BinaryConstraint, solve_csp_problem, \
basic_constraint_checker
def map_coloring_csp_problem():
constraints = []
variables = []
# order of the variables here is the order given in the problem
variables.append(Variable("MA", ["B"]))
variables.append(Variable("TX", ["R"]))
variables.append(Variable("NE", ["R", "B", "Y"]))
variables.append(Variable("OV", ["R", "B", "Y"]))
variables.append(Variable("SE", ["R", "B", "Y"]))
variables.append(Variable("GL", ["R", "B", "Y"]))
variables.append(Variable("MID",["R", "B", "Y"]))
variables.append(Variable("MW", ["R", "B", "Y"]))
variables.append(Variable("SO", ["R", "B"]))
variables.append(Variable("NY", ["R", "B"]))
variables.append(Variable("FL", ["R", "B"]))
# these are all variable pairing of adjacent seats
edges = [("NE", "NY"),
("NE", "MA"),
("MA", "NY"),
("GL", "NY"),
("GL", "OV"),
("MID", "NY"),
("OV", "NY"),
("OV", "MID"),
("MW", "OV"),
("MW", "TX"),
("TX", "SO"),
("SO", "OV"),
("SO", "FL"),
("FL", "SE"),
("SE", "MID"),
("SE", "SO")]
# duplicate the edges the other way.
all_edges = []
for edge in edges:
all_edges.append((edge[0], edge[1]))
all_edges.append((edge[1], edge[0]))
forbidden = [("R", "B"), ("B", "R"), ("Y", "Y")]
# not allowed constraints:
def forbidden_edge(val_a, val_b, name_a, name_b):
if (val_a, val_b) in forbidden or (val_b, val_a) in forbidden:
return False
return True
for pair in all_edges:
constraints.append(
BinaryConstraint(pair[0], pair[1],
forbidden_edge,
"R-B, B-R, Y-Y edges are not allowed"))
return CSP(constraints, variables)
if __name__ == "__main__":
if len(sys.argv) > 1:
checker_type = sys.argv[1]
else:
checker_type = "dfs"
if checker_type == "dfs":
checker = basic_constraint_checker
elif checker_type == "fc":
import lab4
checker = lab4.forward_checking
elif checker_type == "fcps":
import lab4
checker = lab4.forward_checking_prop_singleton
else:
import lab4
checker = lab4.forward_checking_prop_singleton
solve_csp_problem(map_coloring_csp_problem, checker, verbose=True)
| apache-2.0 | -4,624,747,162,642,793,000 | 31 | 70 | 0.508283 | false | 3.613605 | false | false | false |
ianrenton/playbulb-tools | weathercheck/weathercheck.py | 1 | 1370 | # Python script to set a Playbulb LED colour based on the current weather.
# Run me as a cron job for ambient weather information!
# by Ian Renton
# https://github.com/ianrenton/playbulb-tools
# Uses python OpenWeatherMap wrapper from https://github.com/csparpa/pyowm
import pyowm, re, subprocess
#### Config ####
# Your location
LOCATION = 'London'
# Your Playbulb address (obtained with 'sudo hcitool lescan')
PLAYBULB_ADDRESS = '01:23:45:67:89:10'
# Weather to colour dict
COLOUR_MAP = { 'clear': 'FFFF6000',
'clouds': '80000000',
'rain': '000000FF',
'drizzle': '0000FFFF',
'snow': 'FFFFFFFF',
'thunderstorm': '80FF0000'}
#### Code below ####
# Show the name of the playbulb
proc = subprocess.Popen(('gatttool -b ' + PLAYBULB_ADDRESS + ' --char-read -a 0x0003').split(), stdout = subprocess.PIPE)
for line in iter(proc.stdout.readline,''):
name = ''.join(x.strip() for x in re.findall(r'[0-9a-f]{2}\s', line)).decode("hex")
print 'Playbulb name: ' + name
# Get weather forecast
weather = pyowm.OWM().weather_at_place(LOCATION).get_weather().get_status()
colour = COLOUR_MAP[weather]
print 'Weather for ' + LOCATION + ': ' + weather + ', colour ' + colour
# Set Playbulb colour
subprocess.call(('gatttool -b ' + PLAYBULB_ADDRESS + ' --char-write -a 0x0016 -n ' + colour).split())
| bsd-3-clause | 7,456,926,169,413,411,000 | 34.128205 | 121 | 0.651095 | false | 3.051225 | false | false | false |
gogoair/foremast | src/foremast/securitygroup/create_securitygroup.py | 1 | 11172 | # Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create Security Groups for Spinnaker Pipelines.
Security Group port specifications will be sourced from the `application.json`
files for each environment.
Examples:
application-master.json::
{
"security_group": {
"description": "Security Group description",
"ingress": {
"eureka": [
{"start_port": 80, "end_port": 8080, "protocol": "tcp"}
],
"coreforrest": [
8080,
8443
],
"0.0.0.0/0": [
8080
]
}
}
}
"""
import ipaddress
import logging
from contextlib import suppress
import boto3
from boto3.exceptions import botocore
from deepmerge import conservative_merger
from ..consts import DEFAULT_SECURITYGROUP_RULES
from ..exceptions import (ForemastConfigurationFileError, SpinnakerSecurityGroupCreationFailed,
SpinnakerSecurityGroupError)
from ..utils import get_details, get_properties, get_security_group_id, get_template, get_vpc_id, wait_for_task
class SpinnakerSecurityGroup:
"""Manipulate Spinnaker Security Groups.
Args:
app (str): Application name.
env (str): Deployment environment.
prop_path (str): Path to the raw.properties.json.
region (str): AWS Region.
"""
def __init__(self, app=None, env=None, region=None, prop_path=None):
self.log = logging.getLogger(__name__)
self.app_name = app
self.env = env
self.region = region
self.properties = get_properties(properties_file=prop_path, env=self.env, region=self.region)
self.generated = get_details(app=self.app_name)
self.group = self.generated.data['project']
def _validate_cidr(self, rule):
"""Validate the cidr block in a rule.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupCreationFailed: CIDR definition is invalid or
the network range is too wide.
"""
try:
network = ipaddress.IPv4Network(rule['app'])
except (ipaddress.NetmaskValueError, ValueError) as error:
raise SpinnakerSecurityGroupCreationFailed(error)
self.log.debug('Validating CIDR: %s', network.exploded)
return True
def _process_rules(self, rules):
"""Process rules into cidr and non-cidr lists.
Args:
rules (list): Allowed Security Group ports and protocols.
Returns:
(list, list): Security Group reference rules and custom CIDR rules.
"""
cidr = []
non_cidr = []
for rule in rules:
if '.' in rule['app']:
self.log.debug('Custom CIDR rule: %s', rule)
self._validate_cidr(rule)
cidr.append(rule)
else:
self.log.debug('SG reference rule: %s', rule)
non_cidr.append(rule)
self.log.debug('Custom CIDR rules: %s', cidr)
self.log.debug('SG reference rules: %s', non_cidr)
return non_cidr, cidr
def add_tags(self):
"""Add tags to security group.
Returns:
True: Upon successful completion.
"""
session = boto3.session.Session(profile_name=self.env, region_name=self.region)
resource = session.resource('ec2')
group_id = get_security_group_id(self.app_name, self.env, self.region)
security_group = resource.SecurityGroup(group_id)
try:
tag = security_group.create_tags(
DryRun=False,
Tags=[{
'Key': 'app_group',
'Value': self.group
}, {
'Key': 'app_name',
'Value': self.app_name
}])
self.log.debug('Security group has been tagged: %s', tag)
except botocore.exceptions.ClientError as error:
self.log.warning(error)
return True
def add_cidr_rules(self, rules):
"""Add cidr rules to security group via boto.
Args:
rules (list): Allowed Security Group ports and protocols.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupError: boto3 call failed to add CIDR block to
Security Group.
"""
session = boto3.session.Session(profile_name=self.env, region_name=self.region)
client = session.client('ec2')
group_id = get_security_group_id(self.app_name, self.env, self.region)
for rule in rules:
data = {
'DryRun':
False,
'GroupId':
group_id,
'IpPermissions': [{
'IpProtocol': rule['protocol'],
'FromPort': rule['start_port'],
'ToPort': rule['end_port'],
'IpRanges': [{
'CidrIp': rule['app']
}]
}]
}
self.log.debug('Security Group rule: %s', data)
try:
client.authorize_security_group_ingress(**data)
except botocore.exceptions.ClientError as error:
if 'InvalidPermission.Duplicate' in str(error):
self.log.debug('Duplicate rule exist, that is OK.')
else:
msg = 'Unable to add cidr rules to {}'.format(rule.get('app'))
self.log.error(msg)
raise SpinnakerSecurityGroupError(msg)
return True
def resolve_self_references(self, rules):
"""Resolves `$self` references to actual application name in security group rules."""
with suppress(KeyError):
rule = rules.pop('$self')
rules[self.app_name] = rule
return rules
def update_default_rules(self):
"""Concatinate application and global security group rules."""
app_ingress = self.properties['security_group']['ingress']
ingress = conservative_merger.merge(DEFAULT_SECURITYGROUP_RULES, app_ingress)
resolved_ingress = self.resolve_self_references(ingress)
self.log.info('Updated default rules:\n%s', ingress)
return resolved_ingress
def _create_security_group(self, ingress):
"""Send a POST to spinnaker to create a new security group.
Returns:
boolean: True if created successfully
"""
template_kwargs = {
'app': self.app_name,
'env': self.env,
'region': self.region,
'vpc': get_vpc_id(self.env, self.region),
'description': self.properties['security_group']['description'],
'ingress': ingress,
}
secgroup_json = get_template(
template_file='infrastructure/securitygroup_data.json.j2', formats=self.generated, **template_kwargs)
wait_for_task(secgroup_json)
return True
def create_security_group(self): # noqa
"""Send a POST to spinnaker to create or update a security group.
Returns:
boolean: True if created successfully
Raises:
ForemastConfigurationFileError: Missing environment configuration or
misconfigured Security Group definition.
"""
ingress_rules = []
try:
security_id = get_security_group_id(name=self.app_name, env=self.env, region=self.region)
except (SpinnakerSecurityGroupError, AssertionError):
self._create_security_group(ingress_rules)
else:
self.log.debug('Security Group ID %s found for %s.', security_id, self.app_name)
try:
ingress = self.update_default_rules()
except KeyError:
msg = 'Possible missing configuration for "{0}".'.format(self.env)
self.log.error(msg)
raise ForemastConfigurationFileError(msg)
for app in ingress:
rules = ingress[app]
# Essentially we have two formats: simple, advanced
# - simple: is just a list of ports
# - advanced: selects ports ranges and protocols
for rule in rules:
ingress_rule = self.create_ingress_rule(app, rule)
ingress_rules.append(ingress_rule)
ingress_rules_no_cidr, ingress_rules_cidr = self._process_rules(ingress_rules)
self._create_security_group(ingress_rules_no_cidr)
# Append cidr rules
self.add_cidr_rules(ingress_rules_cidr)
# Tag security group
self.add_tags()
self.log.info('Successfully created %s security group', self.app_name)
return True
def create_ingress_rule(self, app, rule):
"""Create a normalized ingress rule.
Args:
app (str): Application name
rule (dict or int): Allowed Security Group ports and protocols.
Returns:
dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
"""
if isinstance(rule, dict):
# Advanced
start_port = rule.get('start_port')
end_port = rule.get('end_port')
protocol = rule.get('protocol', 'tcp')
requested_cross_account = rule.get('env', self.env)
if self.env == requested_cross_account:
# We are trying to use cross-account security group settings within the same account
# We should not allow this.
cross_account_env = None
cross_account_vpc_id = None
else:
cross_account_env = requested_cross_account
cross_account_vpc_id = get_vpc_id(cross_account_env, self.region)
else:
start_port = rule
end_port = rule
protocol = 'tcp'
cross_account_env = None
cross_account_vpc_id = None
created_rule = {
'app': app,
'start_port': start_port,
'end_port': end_port,
'protocol': protocol,
'cross_account_env': cross_account_env,
'cross_account_vpc_id': cross_account_vpc_id
}
self.log.debug('Normalized ingress rule: %s', created_rule)
return created_rule
| apache-2.0 | -642,778,247,066,717,200 | 33.481481 | 113 | 0.568654 | false | 4.372603 | true | false | false |
ReactiveX/RxPY | rx/core/operators/merge.py | 1 | 4401 | from typing import Callable, Optional
import rx
from rx import from_future
from rx.core import Observable
from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal.concurrency import synchronized
from rx.internal.utils import is_future
def _merge(*sources: Observable,
max_concurrent: Optional[int] = None
) -> Callable[[Observable], Observable]:
def merge(source: Observable) -> Observable:
"""Merges an observable sequence of observable sequences into
an observable sequence, limiting the number of concurrent
subscriptions to inner sequences. Or merges two observable
sequences into a single observable sequence.
Examples:
>>> res = merge(sources)
Args:
source: Source observable.
Returns:
The observable sequence that merges the elements of the
inner sequences.
"""
if max_concurrent is None:
sources_ = tuple([source]) + sources
return rx.merge(*sources_)
def subscribe(observer, scheduler=None):
active_count = [0]
group = CompositeDisposable()
is_stopped = [False]
queue = []
def subscribe(xs):
subscription = SingleAssignmentDisposable()
group.add(subscription)
@synchronized(source.lock)
def on_completed():
group.remove(subscription)
if queue:
s = queue.pop(0)
subscribe(s)
else:
active_count[0] -= 1
if is_stopped[0] and active_count[0] == 0:
observer.on_completed()
on_next = synchronized(source.lock)(observer.on_next)
on_error = synchronized(source.lock)(observer.on_error)
subscription.disposable = xs.subscribe_(on_next, on_error, on_completed, scheduler)
def on_next(inner_source):
if active_count[0] < max_concurrent:
active_count[0] += 1
subscribe(inner_source)
else:
queue.append(inner_source)
def on_completed():
is_stopped[0] = True
if active_count[0] == 0:
observer.on_completed()
group.add(source.subscribe_(on_next, observer.on_error, on_completed, scheduler))
return group
return Observable(subscribe)
return merge
def _merge_all() -> Callable[[Observable], Observable]:
def merge_all(source: Observable) -> Observable:
"""Partially applied merge_all operator.
Merges an observable sequence of observable sequences into an
observable sequence.
Args:
source: Source observable to merge.
Returns:
The observable sequence that merges the elements of the inner
sequences.
"""
def subscribe(observer, scheduler=None):
group = CompositeDisposable()
is_stopped = [False]
m = SingleAssignmentDisposable()
group.add(m)
def on_next(inner_source):
inner_subscription = SingleAssignmentDisposable()
group.add(inner_subscription)
inner_source = from_future(inner_source) if is_future(inner_source) else inner_source
@synchronized(source.lock)
def on_completed():
group.remove(inner_subscription)
if is_stopped[0] and len(group) == 1:
observer.on_completed()
on_next = synchronized(source.lock)(observer.on_next)
on_error = synchronized(source.lock)(observer.on_error)
subscription = inner_source.subscribe_(on_next, on_error, on_completed, scheduler)
inner_subscription.disposable = subscription
def on_completed():
is_stopped[0] = True
if len(group) == 1:
observer.on_completed()
m.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
return group
return Observable(subscribe)
return merge_all
| mit | 6,199,957,247,772,746,000 | 34.208 | 101 | 0.561918 | false | 4.757838 | false | false | false |
ioam/paramtk | paramtk/odict.py | 1 | 46083 | from __future__ import generators
# odict.py
# An Ordered Dictionary object
# Copyright (C) 2005 Nicola Larosa, Michael Foord
# E-mail: nico AT tekNico DOT net, fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Documentation at http://www.voidspace.org.uk/python/odict.html
# For information about bugfixes, updates and support, please join the
# Pythonutils mailing list:
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
"""A dict that keeps keys in insertion order"""
__author__ = ('Nicola Larosa <[email protected]>,'
'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>')
__docformat__ = "restructuredtext en"
__revision__ = '$Id: external.py 12024 2012-05-02 21:13:18Z ceball $'
__version__ = '0.2.2'
__all__ = ['OrderedDict', 'SequenceOrderedDict']
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later required")
import types, warnings
class OrderedDict(dict):
"""
A class of dictionary that keeps the insertion order of keys.
All appropriate methods return keys, items, or values in an ordered way.
All normal dictionary methods are available. Update and comparison is
restricted to other OrderedDict objects.
Various sequence methods are available, including the ability to explicitly
mutate the key ordering.
__contains__ tests:
>>> d = OrderedDict(((1, 3),))
>>> 1 in d
1
>>> 4 in d
0
__getitem__ tests:
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[2]
1
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[4]
Traceback (most recent call last):
KeyError: 4
__len__ tests:
>>> len(OrderedDict())
0
>>> len(OrderedDict(((1, 3), (3, 2), (2, 1))))
3
get tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.get(1)
3
>>> d.get(4) is None
1
>>> d.get(4, 5)
5
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
has_key tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.has_key(1)
1
>>> d.has_key(4)
0
"""
def __init__(self, init_val=(), strict=False):
"""
Create a new ordered dictionary. Cannot init from a normal dict,
nor from kwargs, since items order is undefined in those cases.
If the ``strict`` keyword argument is ``True`` (``False`` is the
default) then when doing slice assignment - the ``OrderedDict`` you are
assigning from *must not* contain any keys in the remaining dict.
>>> OrderedDict()
OrderedDict([])
>>> OrderedDict({1: 1})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> OrderedDict({1: 1}.items())
OrderedDict([(1, 1)])
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
>>> OrderedDict(d)
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
self.strict = strict
dict.__init__(self)
if isinstance(init_val, OrderedDict):
self._sequence = init_val.keys()
dict.update(self, init_val)
elif isinstance(init_val, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
self._sequence = []
self.update(init_val)
### Special methods ###
def __delitem__(self, key):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> del d[3]
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> del d[3]
Traceback (most recent call last):
KeyError: 3
>>> d[3] = 2
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> del d[0:1]
>>> d
OrderedDict([(2, 1), (3, 2)])
"""
if isinstance(key, types.SliceType):
# FIXME: efficiency?
keys = self._sequence[key]
for entry in keys:
dict.__delitem__(self, entry)
del self._sequence[key]
else:
# do the dict.__delitem__ *first* as it raises
# the more appropriate error
dict.__delitem__(self, key)
self._sequence.remove(key)
def __eq__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d == OrderedDict(d)
True
>>> d == OrderedDict(((1, 3), (2, 1), (3, 2)))
False
>>> d == OrderedDict(((1, 0), (3, 2), (2, 1)))
False
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d == dict(d)
False
>>> d == False
False
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() == other.items())
else:
return False
def __lt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> c < d
True
>>> d < c
False
>>> d < dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() < other.items())
def __le__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c <= d
True
>>> d <= c
False
>>> d <= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> d <= e
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() <= other.items())
def __ne__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d != OrderedDict(d)
False
>>> d != OrderedDict(((1, 3), (2, 1), (3, 2)))
True
>>> d != OrderedDict(((1, 0), (3, 2), (2, 1)))
True
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d != dict(d)
True
>>> d != False
True
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return not (self.items() == other.items())
else:
return True
def __gt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> d > c
True
>>> c > d
False
>>> d > dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() > other.items())
def __ge__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c >= d
False
>>> d >= c
True
>>> d >= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> e >= d
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() >= other.items())
def __repr__(self):
"""
Used for __repr__ and __str__
>>> r1 = repr(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
>>> r1
"OrderedDict([('a', 'b'), ('c', 'd'), ('e', 'f')])"
>>> r2 = repr(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
>>> r2
"OrderedDict([('a', 'b'), ('e', 'f'), ('c', 'd')])"
>>> r1 == str(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
True
>>> r2 == str(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
True
"""
return '%s([%s])' % (self.__class__.__name__, ', '.join(
['(%r, %r)' % (key, self[key]) for key in self._sequence]))
def __setitem__(self, key, val):
"""
Allows slice assignment, so long as the slice is an OrderedDict
>>> d = OrderedDict()
>>> d['a'] = 'b'
>>> d['b'] = 'a'
>>> d[3] = 12
>>> d
OrderedDict([('a', 'b'), ('b', 'a'), (3, 12)])
>>> d[:] = OrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
OrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d[::2] = OrderedDict(((7, 8), (9, 10)))
>>> d
OrderedDict([(7, 8), (2, 3), (9, 10)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)))
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)), strict=True)
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)), strict=True)
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)])
Traceback (most recent call last):
ValueError: slice assignment must be from unique keys
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)))
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::-1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(3, 4), (2, 3), (1, 2), (0, 1)])
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = 3
Traceback (most recent call last):
TypeError: slice assignment requires an OrderedDict
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = OrderedDict([(9, 8)])
>>> d
OrderedDict([(9, 8), (1, 2), (2, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
if not isinstance(val, OrderedDict):
# FIXME: allow a list of tuples?
raise TypeError('slice assignment requires an OrderedDict')
keys = self._sequence[key]
# NOTE: Could use ``range(*key.indices(len(self._sequence)))``
indexes = range(len(self._sequence))[key]
if key.step is None:
# NOTE: new slice may not be the same size as the one being
# overwritten !
# NOTE: What is the algorithm for an impossible slice?
# e.g. d[5:3]
pos = key.start or 0
del self[key]
newkeys = val.keys()
for k in newkeys:
if k in self:
if self.strict:
raise ValueError('slice assignment must be from '
'unique keys')
else:
# NOTE: This removes duplicate keys *first*
# so start position might have changed?
del self[k]
self._sequence = (self._sequence[:pos] + newkeys +
self._sequence[pos:])
dict.update(self, val)
else:
# extended slice - length of new slice must be the same
# as the one being replaced
if len(keys) != len(val):
raise ValueError('attempt to assign sequence of size %s '
'to extended slice of size %s' % (len(val), len(keys)))
# FIXME: efficiency?
del self[key]
item_list = zip(indexes, val.items())
# smallest indexes first - higher indexes not guaranteed to
# exist
item_list.sort()
for pos, (newkey, newval) in item_list:
if self.strict and newkey in self:
raise ValueError('slice assignment must be from unique'
' keys')
self.insert(pos, newkey, newval)
else:
if key not in self:
self._sequence.append(key)
dict.__setitem__(self, key, val)
def __getitem__(self, key):
"""
Allows slicing. Returns an OrderedDict if you slice.
>>> b = OrderedDict([(7, 0), (6, 1), (5, 2), (4, 3), (3, 4), (2, 5), (1, 6)])
>>> b[::-1]
OrderedDict([(1, 6), (2, 5), (3, 4), (4, 3), (5, 2), (6, 1), (7, 0)])
>>> b[2:5]
OrderedDict([(5, 2), (4, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
# FIXME: does this raise the error we want?
keys = self._sequence[key]
# FIXME: efficiency?
return OrderedDict([(entry, self[entry]) for entry in keys])
else:
return dict.__getitem__(self, key)
__str__ = __repr__
def __setattr__(self, name, value):
"""
Implemented so that accesses to ``sequence`` raise a warning and are
diverted to the new ``setkeys`` method.
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: doesn't return anything
self.setkeys(value)
else:
# FIXME: do we want to allow arbitrary setting of attributes?
# Or do we want to manage it?
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Implemented so that access to ``sequence`` raises a warning.
>>> d = OrderedDict()
>>> d.sequence
[]
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: Still (currently) returns a direct reference. Need to
# because code that uses sequence will expect to be able to
# mutate it in place.
return self._sequence
else:
# raise the appropriate error
raise AttributeError("OrderedDict has no '%s' attribute" % name)
def __deepcopy__(self, memo):
"""
To allow deepcopy to work with OrderedDict.
>>> from copy import deepcopy
>>> a = OrderedDict([(1, 1), (2, 2), (3, 3)])
>>> a['test'] = {}
>>> b = deepcopy(a)
>>> b == a
True
>>> b is a
False
>>> a['test'] is b['test']
False
"""
from copy import deepcopy
return self.__class__(deepcopy(self.items(), memo), self.strict)
### Read-only methods ###
def copy(self):
"""
>>> OrderedDict(((1, 3), (3, 2), (2, 1))).copy()
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
return OrderedDict(self)
def items(self):
"""
``items`` returns a list of tuples representing all the
``(key, value)`` pairs in the dictionary.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.items()
[(1, 3), (3, 2), (2, 1)]
>>> d.clear()
>>> d.items()
[]
"""
return zip(self._sequence, self.values())
def keys(self):
"""
Return a list of keys in the ``OrderedDict``.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
"""
return self._sequence[:]
def values(self, values=None):
"""
Return a list of all the values in the OrderedDict.
Optionally you can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.values()
[3, 2, 1]
"""
return [self[key] for key in self._sequence]
def iteritems(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iteritems()
>>> ii.next()
(1, 3)
>>> ii.next()
(3, 2)
>>> ii.next()
(2, 1)
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
key = keys.next()
yield (key, self[key])
return make_iter()
def iterkeys(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iterkeys()
>>> ii.next()
1
>>> ii.next()
3
>>> ii.next()
2
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
return iter(self._sequence)
__iter__ = iterkeys
def itervalues(self):
"""
>>> iv = OrderedDict(((1, 3), (3, 2), (2, 1))).itervalues()
>>> iv.next()
3
>>> iv.next()
2
>>> iv.next()
1
>>> iv.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
yield self[keys.next()]
return make_iter()
### Read-write methods ###
def clear(self):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.clear()
>>> d
OrderedDict([])
"""
dict.clear(self)
self._sequence = []
def pop(self, key, *args):
"""
No dict.pop in Python 2.2, gotta reimplement it
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.pop(3)
2
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> d.pop(4)
Traceback (most recent call last):
KeyError: 4
>>> d.pop(4, 0)
0
>>> d.pop(4, 0, 1)
Traceback (most recent call last):
TypeError: pop expected at most 2 arguments, got 3
"""
if len(args) > 1:
raise TypeError, ('pop expected at most 2 arguments, got %s' %
(len(args) + 1))
if key in self:
val = self[key]
del self[key]
else:
try:
val = args[0]
except IndexError:
raise KeyError(key)
return val
def popitem(self, i=-1):
"""
Delete and return an item specified by index, not a random one as in
dict. The index is -1 by default (the last item).
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.popitem()
(2, 1)
>>> d
OrderedDict([(1, 3), (3, 2)])
>>> d.popitem(0)
(1, 3)
>>> OrderedDict().popitem()
Traceback (most recent call last):
KeyError: 'popitem(): dictionary is empty'
>>> d.popitem(2)
Traceback (most recent call last):
IndexError: popitem(): index 2 not valid
"""
if not self._sequence:
raise KeyError('popitem(): dictionary is empty')
try:
key = self._sequence[i]
except IndexError:
raise IndexError('popitem(): index %s not valid' % i)
return (key, self.pop(key))
def setdefault(self, key, defval = None):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setdefault(1)
3
>>> d.setdefault(4) is None
True
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None)])
>>> d.setdefault(5, 0)
0
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None), (5, 0)])
"""
if key in self:
return self[key]
else:
self[key] = defval
return defval
def update(self, from_od):
"""
Update from another OrderedDict or sequence of (key, value) pairs
>>> d = OrderedDict(((1, 0), (0, 1)))
>>> d.update(OrderedDict(((1, 3), (3, 2), (2, 1))))
>>> d
OrderedDict([(1, 3), (0, 1), (3, 2), (2, 1)])
>>> d.update({4: 4})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> d.update((4, 4))
Traceback (most recent call last):
TypeError: cannot convert dictionary update sequence element "4" to a 2-item sequence
"""
if isinstance(from_od, OrderedDict):
for key, val in from_od.items():
self[key] = val
elif isinstance(from_od, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
# FIXME: efficiency?
# sequence of 2-item sequences, or error
for item in from_od:
try:
key, val = item
except TypeError:
raise TypeError('cannot convert dictionary update'
' sequence element "%s" to a 2-item sequence' % item)
self[key] = val
def rename(self, old_key, new_key):
"""
Rename the key for a given value, without modifying sequence order.
For the case where new_key already exists this raise an exception,
since if new_key exists, it is ambiguous as to what happens to the
associated values, and the position of new_key in the sequence.
>>> od = OrderedDict()
>>> od['a'] = 1
>>> od['b'] = 2
>>> od.items()
[('a', 1), ('b', 2)]
>>> od.rename('b', 'c')
>>> od.items()
[('a', 1), ('c', 2)]
>>> od.rename('c', 'a')
Traceback (most recent call last):
ValueError: New key already exists: 'a'
>>> od.rename('d', 'b')
Traceback (most recent call last):
KeyError: 'd'
"""
if new_key == old_key:
# no-op
return
if new_key in self:
raise ValueError("New key already exists: %r" % new_key)
# rename sequence entry
value = self[old_key]
old_idx = self._sequence.index(old_key)
self._sequence[old_idx] = new_key
# rename internal dict entry
dict.__delitem__(self, old_key)
dict.__setitem__(self, new_key, value)
def setitems(self, items):
"""
This method allows you to set the items in the dict.
It takes a list of tuples - of the same sort returned by the ``items``
method.
>>> d = OrderedDict()
>>> d.setitems(((3, 1), (2, 3), (1, 2)))
>>> d
OrderedDict([(3, 1), (2, 3), (1, 2)])
"""
self.clear()
# FIXME: this allows you to pass in an OrderedDict as well :-)
self.update(items)
def setkeys(self, keys):
"""
``setkeys`` all ows you to pass in a new list of keys which will
replace the current set. This must contain the same set of keys, but
need not be in the same order.
If you pass in new keys that don't match, a ``KeyError`` will be
raised.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
>>> d.setkeys((1, 2, 3))
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> d.setkeys(['a', 'b', 'c'])
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
"""
# FIXME: Efficiency? (use set for Python 2.4 :-)
# NOTE: list(keys) rather than keys[:] because keys[:] returns
# a tuple, if keys is a tuple.
kcopy = list(keys)
kcopy.sort()
self._sequence.sort()
if kcopy != self._sequence:
raise KeyError('Keylist is not the same as current keylist.')
# NOTE: This makes the _sequence attribute a new object, instead
# of changing it in place.
# FIXME: efficiency?
self._sequence = list(keys)
def setvalues(self, values):
"""
You can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
(Or a ``ValueError`` is raised.)
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setvalues((1, 2, 3))
>>> d
OrderedDict([(1, 1), (3, 2), (2, 3)])
>>> d.setvalues([6])
Traceback (most recent call last):
ValueError: Value list is not the same length as the OrderedDict.
"""
if len(values) != len(self):
# FIXME: correct error to raise?
raise ValueError('Value list is not the same length as the '
'OrderedDict.')
self.update(zip(self, values))
### Sequence Methods ###
def index(self, key):
"""
Return the position of the specified key in the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.index(3)
1
>>> d.index(4)
Traceback (most recent call last):
...
ValueError: 4 is not in list
"""
return self._sequence.index(key)
def insert(self, index, key, value):
"""
Takes ``index``, ``key``, and ``value`` as arguments.
Sets ``key`` to ``value``, so that ``key`` is at position ``index`` in
the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.insert(0, 4, 0)
>>> d
OrderedDict([(4, 0), (1, 3), (3, 2), (2, 1)])
>>> d.insert(0, 2, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2)])
>>> d.insert(8, 8, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2), (8, 1)])
"""
if key in self:
# FIXME: efficiency?
del self[key]
self._sequence.insert(index, key)
dict.__setitem__(self, key, value)
def reverse(self):
"""
Reverse the order of the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.reverse()
>>> d
OrderedDict([(2, 1), (3, 2), (1, 3)])
"""
self._sequence.reverse()
def sort(self, *args, **kwargs):
"""
Sort the key order in the OrderedDict.
This method takes the same arguments as the ``list.sort`` method on
your version of Python.
>>> d = OrderedDict(((4, 1), (2, 2), (3, 3), (1, 4)))
>>> d.sort()
>>> d
OrderedDict([(1, 4), (2, 2), (3, 3), (4, 1)])
"""
self._sequence.sort(*args, **kwargs)
class Keys(object):
# FIXME: should this object be a subclass of list?
"""
Custom object for accessing the keys of an OrderedDict.
Can be called like the normal ``OrderedDict.keys`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the keys method."""
return self._main._keys()
def __getitem__(self, index):
"""Fetch the key at position i."""
# NOTE: this automatically supports slicing :-)
return self._main._sequence[index]
def __setitem__(self, index, name):
"""
You cannot assign to keys, but you can do slice assignment to re-order
them.
You can only do slice assignment if the new set of keys is a reordering
of the original set.
"""
if isinstance(index, types.SliceType):
# FIXME: efficiency?
# check length is the same
indexes = range(len(self._main._sequence))[index]
if len(indexes) != len(name):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(indexes)))
# check they are the same keys
# FIXME: Use set
old_keys = self._main._sequence[index]
new_keys = list(name)
old_keys.sort()
new_keys.sort()
if old_keys != new_keys:
raise KeyError('Keylist is not the same as current keylist.')
orig_vals = [self._main[k] for k in name]
del self._main[index]
vals = zip(indexes, name, orig_vals)
vals.sort()
for i, k, v in vals:
if self._main.strict and k in self._main:
raise ValueError('slice assignment must be from '
'unique keys')
self._main.insert(i, k, v)
else:
raise ValueError('Cannot assign to keys')
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main._sequence)
# FIXME: do we need to check if we are comparing with another ``Keys``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main._sequence < other
def __le__(self, other): return self._main._sequence <= other
def __eq__(self, other): return self._main._sequence == other
def __ne__(self, other): return self._main._sequence != other
def __gt__(self, other): return self._main._sequence > other
def __ge__(self, other): return self._main._sequence >= other
# FIXME: do we need __cmp__ as well as rich comparisons?
def __cmp__(self, other): return cmp(self._main._sequence, other)
def __contains__(self, item): return item in self._main._sequence
def __len__(self): return len(self._main._sequence)
def __iter__(self): return self._main.iterkeys()
def count(self, item): return self._main._sequence.count(item)
def index(self, item, *args): return self._main._sequence.index(item, *args)
def reverse(self): self._main._sequence.reverse()
def sort(self, *args, **kwds): self._main._sequence.sort(*args, **kwds)
def __mul__(self, n): return self._main._sequence*n
__rmul__ = __mul__
def __add__(self, other): return self._main._sequence + other
def __radd__(self, other): return other + self._main._sequence
## following methods not implemented for keys ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from keys')
def __iadd__(self, other): raise TypeError('Can\'t add in place to keys')
def __imul__(self, n): raise TypeError('Can\'t multiply keys in place')
def append(self, item): raise TypeError('Can\'t append items to keys')
def insert(self, i, item): raise TypeError('Can\'t insert items into keys')
def pop(self, i=-1): raise TypeError('Can\'t pop items from keys')
def remove(self, item): raise TypeError('Can\'t remove items from keys')
def extend(self, other): raise TypeError('Can\'t extend keys')
class Items(object):
"""
Custom object for accessing the items of an OrderedDict.
Can be called like the normal ``OrderedDict.items`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the items method."""
return self._main._items()
def __getitem__(self, index):
"""Fetch the item at position i."""
if isinstance(index, types.SliceType):
# fetching a slice returns an OrderedDict
return self._main[index].items()
key = self._main._sequence[index]
return (key, self._main[key])
def __setitem__(self, index, item):
"""Set item at position i to item."""
if isinstance(index, types.SliceType):
# NOTE: item must be an iterable (list of tuples)
self._main[index] = OrderedDict(item)
else:
# FIXME: Does this raise a sensible error?
orig = self._main.keys[index]
key, value = item
if self._main.strict and key in self and (key != orig):
raise ValueError('slice assignment must be from '
'unique keys')
# delete the current one
del self._main[self._main._sequence[index]]
self._main.insert(index, key, value)
def __delitem__(self, i):
"""Delete the item at position i."""
key = self._main._sequence[i]
if isinstance(i, types.SliceType):
for k in key:
# FIXME: efficiency?
del self._main[k]
else:
del self._main[key]
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.items())
# FIXME: do we need to check if we are comparing with another ``Items``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.items() < other
def __le__(self, other): return self._main.items() <= other
def __eq__(self, other): return self._main.items() == other
def __ne__(self, other): return self._main.items() != other
def __gt__(self, other): return self._main.items() > other
def __ge__(self, other): return self._main.items() >= other
def __cmp__(self, other): return cmp(self._main.items(), other)
def __contains__(self, item): return item in self._main.items()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.iteritems()
def count(self, item): return self._main.items().count(item)
def index(self, item, *args): return self._main.items().index(item, *args)
def reverse(self): self._main.reverse()
def sort(self, *args, **kwds): self._main.sort(*args, **kwds)
def __mul__(self, n): return self._main.items()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.items() + other
def __radd__(self, other): return other + self._main.items()
def append(self, item):
"""Add an item to the end."""
# FIXME: this is only append if the key isn't already present
key, value = item
self._main[key] = value
def insert(self, i, item):
key, value = item
self._main.insert(i, key, value)
def pop(self, i=-1):
key = self._main._sequence[i]
return (key, self._main.pop(key))
def remove(self, item):
key, value = item
try:
assert value == self._main[key]
except (KeyError, AssertionError):
raise ValueError('ValueError: list.remove(x): x not in list')
else:
del self._main[key]
def extend(self, other):
# FIXME: is only a true extend if none of the keys already present
for item in other:
key, value = item
self._main[key] = value
def __iadd__(self, other):
self.extend(other)
## following methods not implemented for items ##
def __imul__(self, n): raise TypeError('Can\'t multiply items in place')
class Values(object):
"""
Custom object for accessing the values of an OrderedDict.
Can be called like the normal ``OrderedDict.values`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the values method."""
return self._main._values()
def __getitem__(self, index):
"""Fetch the value at position i."""
if isinstance(index, types.SliceType):
return [self._main[key] for key in self._main._sequence[index]]
else:
return self._main[self._main._sequence[index]]
def __setitem__(self, index, value):
"""
Set the value at position i to value.
You can only do slice assignment to values if you supply a sequence of
equal length to the slice you are replacing.
"""
if isinstance(index, types.SliceType):
keys = self._main._sequence[index]
if len(keys) != len(value):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(value), len(keys)))
# FIXME: efficiency? Would be better to calculate the indexes
# directly from the slice object
# NOTE: the new keys can collide with existing keys (or even
# contain duplicates) - these will overwrite
for key, val in zip(keys, value):
self._main[key] = val
else:
self._main[self._main._sequence[index]] = value
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.values())
# FIXME: do we need to check if we are comparing with another ``Values``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.values() < other
def __le__(self, other): return self._main.values() <= other
def __eq__(self, other): return self._main.values() == other
def __ne__(self, other): return self._main.values() != other
def __gt__(self, other): return self._main.values() > other
def __ge__(self, other): return self._main.values() >= other
def __cmp__(self, other): return cmp(self._main.values(), other)
def __contains__(self, item): return item in self._main.values()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.itervalues()
def count(self, item): return self._main.values().count(item)
def index(self, item, *args): return self._main.values().index(item, *args)
def reverse(self):
"""Reverse the values"""
vals = self._main.values()
vals.reverse()
# FIXME: efficiency
self[:] = vals
def sort(self, *args, **kwds):
"""Sort the values."""
vals = self._main.values()
vals.sort(*args, **kwds)
self[:] = vals
def __mul__(self, n): return self._main.values()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.values() + other
def __radd__(self, other): return other + self._main.values()
## following methods not implemented for values ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from values')
def __iadd__(self, other): raise TypeError('Can\'t add in place to values')
def __imul__(self, n): raise TypeError('Can\'t multiply values in place')
def append(self, item): raise TypeError('Can\'t append items to values')
def insert(self, i, item): raise TypeError('Can\'t insert items into values')
def pop(self, i=-1): raise TypeError('Can\'t pop items from values')
def remove(self, item): raise TypeError('Can\'t remove items from values')
def extend(self, other): raise TypeError('Can\'t extend values')
class SequenceOrderedDict(OrderedDict):
"""
Experimental version of OrderedDict that has a custom object for ``keys``,
``values``, and ``items``.
These are callable sequence objects that work as methods, or can be
manipulated directly as sequences.
Test for ``keys``, ``items`` and ``values``.
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys
[1, 2, 3]
>>> d.keys()
[1, 2, 3]
>>> d.setkeys((3, 2, 1))
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.setkeys((1, 2, 3))
>>> d.keys[0]
1
>>> d.keys[:]
[1, 2, 3]
>>> d.keys[-1]
3
>>> d.keys[-2]
2
>>> d.keys[0:2] = [2, 1]
>>> d
SequenceOrderedDict([(2, 3), (1, 2), (3, 4)])
>>> d.keys.reverse()
>>> d.keys
[3, 1, 2]
>>> d.keys = [1, 2, 3]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys = [3, 1, 2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2), (2, 3)])
>>> a = SequenceOrderedDict()
>>> b = SequenceOrderedDict()
>>> a.keys == b.keys
1
>>> a['a'] = 3
>>> a.keys == b.keys
0
>>> b['a'] = 3
>>> a.keys == b.keys
1
>>> b['b'] = 3
>>> a.keys == b.keys
0
>>> a.keys > b.keys
0
>>> a.keys < b.keys
1
>>> 'a' in a.keys
1
>>> len(b.keys)
2
>>> 'c' in d.keys
0
>>> 1 in d.keys
1
>>> [v for v in d.keys]
[3, 1, 2]
>>> d.keys.sort()
>>> d.keys
[1, 2, 3]
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)), strict=True)
>>> d.keys[::-1] = [1, 2, 3]
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.keys[:2]
[3, 2]
>>> d.keys[:2] = [1, 3]
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.values
[2, 3, 4]
>>> d.values()
[2, 3, 4]
>>> d.setvalues((4, 3, 2))
>>> d
SequenceOrderedDict([(1, 4), (2, 3), (3, 2)])
>>> d.values[::-1]
[2, 3, 4]
>>> d.values[0]
4
>>> d.values[-2]
3
>>> del d.values[0]
Traceback (most recent call last):
TypeError: Can't delete items from values
>>> d.values[::2] = [2, 4]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> 7 in d.values
0
>>> len(d.values)
3
>>> [val for val in d.values]
[2, 3, 4]
>>> d.values[-1] = 2
>>> d.values.count(2)
2
>>> d.values.index(2)
0
>>> d.values[-1] = 7
>>> d.values
[2, 3, 7]
>>> d.values.reverse()
>>> d.values
[7, 3, 2]
>>> d.values.sort()
>>> d.values
[2, 3, 7]
>>> d.values.append('anything')
Traceback (most recent call last):
TypeError: Can't append items to values
>>> d.values = (1, 2, 3)
>>> d
SequenceOrderedDict([(1, 1), (2, 2), (3, 3)])
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.items()
[(1, 2), (2, 3), (3, 4)]
>>> d.setitems([(3, 4), (2 ,3), (1, 2)])
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.items[0]
(3, 4)
>>> d.items[:-1]
[(3, 4), (2, 3)]
>>> d.items[1] = (6, 3)
>>> d.items
[(3, 4), (6, 3), (1, 2)]
>>> d.items[1:2] = [(9, 9)]
>>> d
SequenceOrderedDict([(3, 4), (9, 9), (1, 2)])
>>> del d.items[1:2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2)])
>>> (3, 4) in d.items
1
>>> (4, 3) in d.items
0
>>> len(d.items)
2
>>> [v for v in d.items]
[(3, 4), (1, 2)]
>>> d.items.count((3, 4))
1
>>> d.items.index((1, 2))
1
>>> d.items.index((2, 1))
Traceback (most recent call last):
...
ValueError: (2, 1) is not in list
>>> d.items.reverse()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.reverse()
>>> d.items.sort()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.append((5, 6))
>>> d.items
[(1, 2), (3, 4), (5, 6)]
>>> d.items.insert(0, (0, 0))
>>> d.items
[(0, 0), (1, 2), (3, 4), (5, 6)]
>>> d.items.insert(-1, (7, 8))
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8), (5, 6)]
>>> d.items.pop()
(5, 6)
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8)]
>>> d.items.remove((1, 2))
>>> d.items
[(0, 0), (3, 4), (7, 8)]
>>> d.items.extend([(1, 2), (5, 6)])
>>> d.items
[(0, 0), (3, 4), (7, 8), (1, 2), (5, 6)]
"""
def __init__(self, init_val=(), strict=True):
OrderedDict.__init__(self, init_val, strict=strict)
self._keys = self.keys
self._values = self.values
self._items = self.items
self.keys = Keys(self)
self.values = Values(self)
self.items = Items(self)
self._att_dict = {
'keys': self.setkeys,
'items': self.setitems,
'values': self.setvalues,
}
def __setattr__(self, name, value):
"""Protect keys, items, and values."""
if not '_att_dict' in self.__dict__:
object.__setattr__(self, name, value)
else:
try:
fun = self._att_dict[name]
except KeyError:
OrderedDict.__setattr__(self, name, value)
else:
fun(value)
| bsd-3-clause | 3,860,535,775,826,466,300 | 32.248918 | 93 | 0.493306 | false | 3.779774 | false | false | false |
Rosebotics/pymata-aio | examples/sparkfun_redbot/sparkfun_experiments/Exp5_Bumpers.py | 1 | 2908 | #!/usr/bin/python
"""
Exp5_Bumpers -- RedBot Experiment 5
Now let's experiment with the whisker bumpers. These super-simple switches
let you detect a collision before it really happens- the whisker will
bump something before your robot crashes into it.
This sketch was written by SparkFun Electronics, with lots of help from
the Arduino community.
This code is completely free for any use.
Visit https://learn.sparkfun.com/tutorials/redbot-inventors-kit-guide
for SIK information.
8 Oct 2013 M. Hord
Revised 30 Oct 2014 B. Huang
Revised 2 Oct 2015 L. Mathews
"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotMotors, RedBotBumper
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "137.112.217.88" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
# Instantiate the motor control object. This only needs to be done once.
motors = RedBotMotors(board)
left_bumper = RedBotBumper(board, 3) # initializes bumper object on pin 3
right_bumper = RedBotBumper(board, 11) # initializes bumper object on pin 11
BUTTON_PIN = 12
def signal_handler(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
signal.signal(signal.SIGINT, signal_handler)
print("Experiment 5 - Bump sensors")
def loop():
motors.drive(255)
board.sleep(0.1) # When using a wireless connection a small sleep is necessary
left_bumper_state = left_bumper.read()
right_bumper_state = right_bumper.read()
if left_bumper_state == 0: # left bumper is bumped
print("Left bump")
reverse()
turn_right()
if right_bumper_state == 0: # left bumper is bumped
print("Right bump")
reverse()
turn_left()
def reverse():
"""backs up at full power"""
motors.drive(-255)
board.sleep(0.5)
motors.brake()
board.sleep(0.1)
def turn_right():
"""turns RedBot to the Right"""
motors.left_motor(-150) # spin CCW
motors.right_motor(-150) # spin CCW
board.sleep(0.5)
motors.brake();
board.sleep(0.1) # short delay to let robot fully stop
def turn_left():
"""turns RedBot to the Left"""
motors.left_motor(150) # spin CCW
motors.right_motor(150) # spin CCW
board.sleep(0.5)
motors.brake();
board.sleep(0.1) # short delay to let robot fully stop
if __name__ == "__main__":
setup()
while True:
loop()
| gpl-3.0 | 8,720,228,147,168,475,000 | 25.436364 | 95 | 0.671939 | false | 3.16087 | false | false | false |
pgr-me/metis_projects | 04-marijuana/library/utilities.py | 1 | 3623 | from nytimesarticle import articleAPI
import collections
import math
import datetime
import re
import pickle
# api keys
prasmuss = '7b4597b0dc6845688a8f90c00f3e60b6'
peter_gray_rasmussen = '67391c8a5c6c2d8926eb3d9c5d136c59:7:72273330'
proton = 'f8c34c7cda7848f997a9c273815d28a9'
api = articleAPI(proton)
def convert(data):
'''
this function encodes dictionary of unicode entries into utf8
from http://stackoverflow.com/questions/1254454/fastest-way-to-convert-a-dicts-keys-values-from-unicode-to-str
'''
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
def get_nyt_article_stats(articles_and_meta):
'''
returns the number of hits, number of hits in 100 pages, and hits per page
'''
num_hits = articles_and_meta['response']['meta']['hits'] # total number of articles for query
hits_per_query_set = articles_and_meta['response']['meta']['offset'] # each query gets up to 100 pages
hits_per_page = len(articles_and_meta['response']['docs']) # hits per page
pages = hits_per_query_set / hits_per_page
queries = int(math.ceil(num_hits / float(hits_per_page)))
return num_hits, hits_per_query_set, pages, hits_per_page, queries
def get_last_date_plus_one(articles_and_meta):
"""
returns last (not necessarily most recent) date
"""
date_li = articles_and_meta['response']['docs'][-1]['pub_date'].split('T')[0].split('-')
date_str = ''.join(date_li)
date_date = datetime.datetime.strptime(date_str, '%Y%m%d').date()
date_date_plus_one = str(date_date + datetime.timedelta(days=1))
output = re.sub('-', '', date_date_plus_one)
return output
def extract_to_mongod(query, date_begin, date_end, mdb):
"""
pings nyt api and writes to mongodb
"""
data_converted = None
while data_converted is None:
try:
data = api.search(q=query, begin_date=date_begin, end_date='20160430', sort='oldest')
data_converted = convert(data) # convert unicode to strings
except:
pass
date_date = datetime.datetime.strptime(date_begin, '%Y%m%d').date()
date_date_plus_one = str(date_date + datetime.timedelta(days=1))
date_begin = re.sub('-', '', date_date_plus_one)
stats = get_nyt_article_stats(data_converted) # outputs key stats from first ping
pings = stats[-1] # number of pings required
pings_list = range(0, pings - 1)
d_begin = date_begin
for ping in pings_list:
print d_begin
# get data from api
try:
data2 = api.search(q=query, begin_date=d_begin, end_date='20160430', sort='oldest')
data_converted2 = convert(data2) # convert unicode to strings
last_date_plus_one = get_last_date_plus_one(data_converted2)
mdb.insert_one(data_converted2) # insert one set of articles into db
d_begin = last_date_plus_one # update date
except:
date_date = datetime.datetime.strptime(d_begin, '%Y%m%d').date()
date_date_plus_one = str(date_date + datetime.timedelta(days=1))
d_begin = re.sub('-', '', date_date_plus_one)
return 'success'
def pickle_mongo(collection_name, filename):
cur = collection_name.find()
l = []
for doc in cur:
l.append(doc)
file_name = filename + '.pickle'
with open (file_name, 'wb') as f:
pickle.dump(l, f) | gpl-3.0 | 5,287,285,362,401,634,000 | 35.979592 | 114 | 0.645598 | false | 3.317766 | false | false | false |
gmelenka/BraidedCompositeDesign | main.py | 1 | 49822 | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
from kivy.uix.spinner import Spinner
from kivy.uix.listview import ListItemButton
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line, Rectangle
from kivy.metrics import dp
from kivy.vector import Vector
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from thumbchooser import FileChooserThumbView
from kivy.core.window import Window
from os import listdir
from os.path import dirname, join
import os
import glob
import math
import shutil
#version required for Buildozer
__version__ = "1.0"
__author__ = "Garrett Melenka, Marcus Ivey"
__copyright__ = "Copyright 2015, The Multipurpose Composites Group- University of Alberta"
__credits__ = ["Garrett Melenka", "Marcus Ivey", "Jason Carey"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Garrett Melenka"
__email__ = "[email protected]"
__status__ = "Production"
class MainScreen(FloatLayout):
pass
#Load dialog for angle measure popup window
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#make sure image file have been selected
def fileSelect(self, path, name):
print name
if name:
self.load(path, name)
#Angle Measurement layout
class AngleLayout(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def show_load(self):
content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Select Image", content=content, size_hint=(0.99, 0.99))
self._popup.open()
def load(self, path, filename):
filename = filename[0]
image = self.ids['image']
image.source = filename
self.dismiss_popup()
def initiate_angle_finder(self):
button = self.ids['toggle']
image = self.ids['scatter']
anchor = self.ids['anchor']
if button.state == 'down':
image.do_scale = False
image.do_rotation = False
image.do_translation_x = False
image.do_translation_y = False
button.text = 'Clear'
angleFinder = AngleFinder()
anchor.add_widget(angleFinder)
if button.state == 'normal':
button.text = 'Draw'
image.do_scale = True
image.do_rotation = True
image.do_translation_x = True
image.do_translation_y = True
anchor.clear_widgets()
def reset_image(self):
button = self.ids['toggle']
anchor = self.ids['anchor']
scatter = self.ids['scatter']
image = self.ids['image']
anchor2 = self.ids['anchor2']
scatter.scale = 1
scatter.rotation = 0
scatter.pos = (anchor2.center_x - image.center_x, anchor2.center_y - image.center_y)
button.state = 'normal'
button.text = 'Draw'
scatter.do_scale = True
scatter.do_rotation = True
scatter.do_translation_x = True
scatter.do_translation_y = True
anchor.clear_widgets()
#This is the angle finder widget that allows drawing two straight lines and calculates the minor angle between the two.
class AngleFinder(Widget):
#This function initializes the widget with a touch count of zero
def __init__(self, **kwargs):
super(AngleFinder, self).__init__(**kwargs)
self.touch_down_count = 0
#This function defines the actions that take place when a touch event occurs
def on_touch_down(self, touch):
#Record the touch coordinates in x and y as variables
x1 = touch.x
y1 = touch.y
#when the touch count is 0 or 1, we will record the touch coordinates and draw a crosshair at the touch location
if self.touch_down_count > 1:
return
with self.canvas:
touch.ud['label'] = TextInput()
self.initiate_touch_label(touch.ud['label'], touch)
self.add_widget(touch.ud['label'])
#save the touch points to the user dictionary
touch.ud['x1'] = x1
touch.ud['y1'] = y1
#set parameters for crosshair display
Color(1, 0, 0)
l = dp(25)
w = dp(1)
#draw crosshair
Rectangle(pos=(touch.ud['x1'] - w / 2, touch.ud['y1'] - l / 2), size=(w, l))
Rectangle(pos=(touch.ud['x1'] - l / 2, touch.ud['y1'] - w / 2), size=(l, w))
#Initialize the vector v1
if self.touch_down_count == 0:
#Record the touch coordinates to variables
x2 = touch.x
y2 = touch.y
#Save touch coordinates to the user dictionary
touch.ud['x2'] = x2
touch.ud['y2'] = y2
#When the touch count is zero (first touch), we define a vector v1 based on the touch positions in ud
v1 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v1 = v1
#Function to define what happens on a drag action
def on_touch_move(self, touch):
#Record the touch coordinates to variables
x2 = touch.x
y2 = touch.y
#Save touch coordinates to the user dictionary
touch.ud['x2'] = x2
touch.ud['y2'] = y2
ud = touch.ud
#define a group, g, that will be assigned to the line drawn to allow the line to be redrawn as movements occur, leaving only one line on the screen
ud['group'] = g = str(touch.uid)
self.canvas.remove_group(g)
#When the touch count is zero (first touch), we define a vector v1 based on the touch positions in ud
if self.touch_down_count == 0:
v1 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v1 = v1
#When the touch count is 1 (second touch), we define a vector v2 based on the touch positions in ud. The angle between vectors v1 and v2 is then calculated.
if self.touch_down_count == 1:
v2 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v2 = v2
angle = Vector(self.v1).angle(self.v2)
absoluteAngle = abs(angle)
#The following if statement is used to ensure the minor angle is always calculated
if absoluteAngle > 90:
absoluteAngle = 180 - absoluteAngle
#The next two lines are used to update the angle label value as the lines are moved around
touch.ud['angle'] = absoluteAngle
self.update_touch_label(touch.ud['label'], touch)
#If the touch count is greater than 1 (third touch), then this function will end and the canvas will clear as in the previous function
if self.touch_down_count > 1:
return
#This defines the line and crosshair that is drawn between the initial touch point and where the finger has been dragged
with self.canvas:
Color(1, 0, 0)
l = dp(25)
w = dp(1)
Line(points=[touch.ud['x1'], touch.ud['y1'], x2, y2], width=w, group=g)
Rectangle(pos=(touch.ud['x2'] - w / 2, touch.ud['y2'] - l / 2), size=(w, l), group=g)
Rectangle(pos=(touch.ud['x2'] - l / 2, touch.ud['y2'] - w / 2), size=(l, w), group=g)
#this function defines what to do when a touch is released. The touch count is simply incremented
def on_touch_up(self, touch):
self.touch_down_count += 1
#This function defines how the angle label is to be updated. It indicates the number of digits to show, the label size and position, color, and font type
def update_touch_label(self, label, touch):
degree = unichr(176)
label.text = '%.1f%s' % ((touch.ud['angle']), degree)
label.pos = (self.center_x - dp(40), self.height + dp(70))
label.font_size = dp(24)
label.size = dp(75), dp(40)
label.padding_x = [dp(10), dp(10)]
label.padding_y = [dp(5), dp(5)]
label.readonly = True
label.multiline = False
def initiate_touch_label(self, label, touch):
degree = unichr(176)
label.text = '%s%s' % ('---', degree)
label.pos = (self.center_x - dp(40), self.height + dp(70))
label.font_size = dp(24)
label.size = dp(75), dp(40)
label.padding_x = [dp(10), dp(10)]
label.padding_y = [dp(5), dp(5)]
label.readonly = True
label.multiline = False
pass
#Layout for static about screen
class About_Screen(FloatLayout):
pass
#About popup for Micromechanics window
class MicroMechanicsAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Calculation of micromechanics properties for a unidirectional lamina
class MicroMechanics(FloatLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutMicromechanics(self):
content = MicroMechanicsAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Micro-mechanics About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def fiberSelect(self):
fiberType = self.ids.yarnSelectSpinner.text
#E-glass properties
if fiberType == "E-Glass":
self.ids.longitudinalModulus.text = str(73.0)
self.ids.transverseModulus.text = str(73.0)
self.ids.shearModulus.text = str(30.0)
self.ids.majorPoissonRatio.text = str(0.23)
#S-glass properties
if fiberType == "S-Glass":
self.ids.longitudinalModulus.text = str(86.0)
self.ids.transverseModulus.text = str(86.0)
self.ids.shearModulus.text = str(35)
self.ids.majorPoissonRatio.text = str(0.23)
#AS4 Carbon fiber properties
if fiberType == "AS4-Carbon":
self.ids.longitudinalModulus.text = str(235.0)
self.ids.transverseModulus.text = str(15)
self.ids.shearModulus.text = str(27)
self.ids.majorPoissonRatio.text = str(0.20)
#T300 Carbon fiber properties
if fiberType == "T300 Carbon":
self.ids.longitudinalModulus.text = str(230.0)
self.ids.transverseModulus.text = str(15)
self.ids.shearModulus.text = str(27)
self.ids.majorPoissonRatio.text = str(0.20)
#Boron fiber properties
if fiberType == "Boron":
self.ids.longitudinalModulus.text = str(395.0)
self.ids.transverseModulus.text = str(395)
self.ids.shearModulus.text = str(165)
self.ids.majorPoissonRatio.text = str(0.13)
#Kevlar 49 fiber properties
if fiberType == "Kevlar 49":
self.ids.longitudinalModulus.text = str(131.0)
self.ids.transverseModulus.text = str(7)
self.ids.shearModulus.text = str(21)
self.ids.majorPoissonRatio.text = str(0.33)
#
if fiberType == "Custom":
self.ids.longitudinalModulus.text = str(100.0)
self.ids.transverseModulus.text = str(10)
self.ids.shearModulus.text = str(20)
self.ids.majorPoissonRatio.text = str(0.20)
def matrixSelect(self):
matrixType = self.ids.matrixSelectSpinner.text
#Epoxy mechanical properties
if matrixType == "Epoxy":
self.ids.matrixModulus.text = str(4.3)
self.ids.matrixShearModulus.text = str(1.6)
self.ids.matrixPoissonRatio.text = str(0.35)
#polyester mechanical properties
if matrixType == "Polyester":
self.ids.matrixModulus.text = str(3.2)
self.ids.matrixShearModulus.text = str(0.7)
self.ids.matrixPoissonRatio.text = str(0.35)
#Polyimides mechanical properties
if matrixType == "Polyimides":
self.ids.matrixModulus.text = str(1.4)
self.ids.matrixShearModulus.text = str(3.1)
self.ids.matrixPoissonRatio.text = str(0.35)
#PEEK mechanical properties
if matrixType == "PEEK":
self.ids.matrixModulus.text = str(1.32)
self.ids.matrixShearModulus.text = str(3.7)
self.ids.matrixPoissonRatio.text = str(0.35)
def CalculateVF(self, data):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn' and matrixVal != 'Select Matrix':
Ef1 = float(self.ids.longitudinalModulus.text)
Ef2 = float(self.ids.transverseModulus.text)
Gf12 = float(self.ids.shearModulus.text)
nuf12 = float(self.ids.majorPoissonRatio.text)
VF = float(self.ids.volumeFraction.text)
Em = float(self.ids.matrixModulus.text)
Gm = float(self.ids.matrixShearModulus.text)
num = float(self.ids.matrixPoissonRatio.text)
if VF>=0 and VF <1:
#Calculate Longitudinal Elastic Modulus
E1 = Ef1 * VF + Em * (1-VF)
#Calculate Transverse Elastic Modulus
E2 = Ef2*Em / (Ef2*(1-VF) + Em*VF)
#Calculate Major Poisson's Ratio
nu12 = nuf12*VF + num*(1-VF)
#Calculate Shear Modulus
G12 = Gf12*Gm / (Gm * VF+ Gf12 * (1 - VF))
#Write values to screen
self.ids.modulusE1.text = '{0:.3f}'.format(E1)
self.ids.modulusE2.text = '{0:.3f}'.format(E2)
self.ids.modulusG12.text = '{0:.3f}'.format(G12)
self.ids.poissonNu12.text = '{0:.3f}'.format(nu12)
def volumeFractionUp(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF + 0.1
if VF_new <= 1.0:
self.ids.volumeFraction.text = str(VF_new)
def volumeFractionDown(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF - 0.1
if VF_new > 0.0:
self.ids.volumeFraction.text = str(VF_new)
def EF1Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF1 = float(self.ids.longitudinalModulus.text)
EF1_new = EF1 + 1.0
#if EF1_new <= 1.0:
self.ids.longitudinalModulus.text = str(EF1_new)
def EF1Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF1 = float(self.ids.longitudinalModulus.text)
EF1_new = EF1 - 1.0
if EF1_new > 0.0:
self.ids.longitudinalModulus.text = str(EF1_new)
def EF2Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF2 = float(self.ids.transverseModulus.text)
EF2_new = EF2 + 1.0
#if EF1_new <= 1.0:
self.ids.transverseModulus.text = str(EF2_new)
def EF2Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
EF2 = float(self.ids.transverseModulus.text)
EF2_new = EF2 - 1.0
if EF2_new > 0.0:
self.ids.transverseModulus.text = str(EF2_new)
def GF12Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
GF12 = float(self.ids.shearModulus.text)
GF12_new = GF12 + 1.0
#if EF1_new <= 1.0:
self.ids.shearModulus.text = str(GF12_new)
def GF12Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
GF12 = float(self.ids.shearModulus.text)
GF12_new = GF12 - 1.0
if GF12_new > 0.0:
self.ids.shearModulus.text = str(GF12_new)
def nuf12Up(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
nuf12 = float(self.ids.majorPoissonRatio.text)
nuf12_new = nuf12 + 0.1
#if EF1_new <= 1.0:
self.ids.majorPoissonRatio.text = str(nuf12_new)
def nuf12Down(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if yarnVal != 'Select Yarn':
nuf12 = float(self.ids.majorPoissonRatio.text)
nuf12_new = nuf12 - 0.1
if nuf12_new > 0.0:
self.ids.majorPoissonRatio.text = str(nuf12_new)
def EmUp(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Em = float(self.ids.matrixModulus.text)
Em_new = Em + 0.1
#if EF1_new <= 1.0:
self.ids.matrixModulus.text = str(Em_new)
def EmDown(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Em = float(self.ids.matrixModulus.text)
Em_new = Em - 0.1
if Em_new > 0.0:
self.ids.matrixModulus.text = str(Em_new)
def GmUp(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Gm = float(self.ids.matrixShearModulus.text)
Gm_new = Gm + 0.1
#if EF1_new <= 1.0:
self.ids.matrixShearModulus.text = str(Gm_new)
def GmDown(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Gm = float(self.ids.matrixShearModulus.text)
Gm_new = Gm - 0.1
if Gm_new > 0.0:
self.ids.matrixShearModulus.text = str(Gm_new)
def NuMUp(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Num = float(self.ids.matrixPoissonRatio.text)
Num_new = Num + 0.1
#if EF1_new <= 1.0:
self.ids.matrixPoissonRatio.text = str(Num_new)
def NuMDown(self):
matrixVal = self.ids.matrixSelectSpinner.text
yarnVal = self.ids.yarnSelectSpinner.text
if matrixVal != 'Select Matrix':
Num = float(self.ids.matrixPoissonRatio.text)
Num_new = Num - 0.1
if Num_new > 0.0:
self.ids.matrixPoissonRatio.text = str(Num_new)
#About popup for lamina strength window
class LaminaStrengthAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Calculation of lamina strength properties of a unidirectional lamina
class LaminaStrength(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutStrength(self):
content = LaminaStrengthAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Lamina Strength About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def fiberSelect(self):
fiberType = self.ids.yarnSelectSpinner.text
#E-glass properties
if fiberType == "E-Glass":
self.ids.fiberStrength.text = str(3450.0)
self.ids.fiberModulus.text = str(73.0)
#S-glass properties
if fiberType == "S-Glass":
self.ids.fiberStrength.text = str(4500.0)
self.ids.fiberModulus.text = str(86.0)
#AS4 Carbon fiber properties
if fiberType == "AS4-Carbon":
self.ids.fiberStrength.text = str(3700.0)
self.ids.fiberModulus.text = str(235.0)
#T300 Carbon fiber properties
if fiberType == "T300 Carbon":
self.ids.fiberStrength.text = str(3100.0)
self.ids.fiberModulus.text = str(230.0)
#Boron fiber properties
if fiberType == "Boron":
self.ids.fiberStrength.text = str(3450.0)
self.ids.fiberModulus.text = str(395.0)
#Kevlar 49 fiber properties
if fiberType == "Kevlar 49":
self.ids.fiberStrength.text = str(3800.0)
self.ids.fiberModulus.text = str(131.0)
#
if fiberType == "Custom":
self.ids.fiberStrength.text = str(1000.0)
self.ids.fiberModulus.text = str(100.0)
def matrixSelect(self):
matrixType = self.ids.matrixSelectSpinner.text
#Epoxy mechanical properties
if matrixType == "Epoxy":
self.ids.matrixModulus.text = str(4.3)
self.ids.matrixShearModulus.text = str(1.6)
#self.ids.matrixPoissonRatio.text = str(0.35)
#polyester mechanical properties
if matrixType == "Polyester":
self.ids.matrixModulus.text = str(3.2)
self.ids.matrixShearModulus.text = str(0.7)
#self.ids.matrixPoissonRatio.text = str(0.35)
#Polyimides mechanical properties
if matrixType == "Polyimides":
self.ids.matrixModulus.text = str(1.4)
self.ids.matrixShearModulus.text = str(3.1)
#self.ids.matrixPoissonRatio.text = str(0.35)
#PEEK mechanical properties
if matrixType == "PEEK":
self.ids.matrixModulus.text = str(1.32)
self.ids.matrixShearModulus.text = str(3.7)
#self.ids.matrixPoissonRatio.text = str(0.35)
#Calculate the strength properties of a composite lamina
def CalculateLaminaStrength(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn' and matrixVal != 'SelectMatrix':
Ef = float(self.ids.fiberModulus.text)
Em = float(self.ids.matrixModulus.text)
Gm = float(self.ids.matrixShearModulus.text)
Vf = float(self.ids.volumeFraction.text)
sigmaF = float(self.ids.fiberStrength.text)
if Ef > 0:
if Vf >=0 and Vf <1:
#Calculate ultimate tensile strength
sigmaFiber = sigmaF * Vf + sigmaF * (Em / Ef) * (1-Vf)
self.ids.tensileStrength.text = '{0:.1f}'.format(sigmaFiber)
#Calculate ultimate compressive strength
num = Vf * Em * Ef
den = 3 * (1 - Vf)
sqrt = math.sqrt(num / den)
sigmaC = 2 * Vf * sqrt
self.ids.compressiveStrength.text = '{0:.1f}'.format(sigmaC)
#Calculate ultimate compressive strength shear mode
sigmaCshear = (Gm / (1 - Vf)) * 1000
self.ids.compressiveStrengthShear.text = '{0:.1f}'.format(sigmaCshear)
def volumeFractionUp(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF + 0.1
if VF_new <= 1.0:
self.ids.volumeFraction.text = str(VF_new)
def volumeFractionDown(self):
VF = float(self.ids.volumeFraction.text)
VF_new = VF - 0.1
if VF_new > 0.0:
self.ids.volumeFraction.text = str(VF_new)
def fiberModulusUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
EF = float(self.ids.fiberModulus.text)
EF_new = EF + 1.0
#if EF_new <= 1.0:
self.ids.fiberModulus.text = str(EF_new)
def fiberModulusDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
EF = float(self.ids.fiberModulus.text)
EF_new = EF - 1.0
if EF_new > 0.0:
self.ids.fiberModulus.text = str(EF_new)
def fiberStrengthUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
SF = float(self.ids.fiberStrength.text)
SF_new = SF + 10.0
#if EF_new <= 1.0:
self.ids.fiberStrength.text = str(SF_new)
def fiberStrengthDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if yarnVal != 'Select Yarn':
SF = float(self.ids.fiberStrength.text)
SF_new = SF - 10.0
if SF_new > 0.0:
self.ids.fiberStrength.text = str(SF_new)
def matrixModulusUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
EM = float(self.ids.matrixModulus.text)
EM_new = EM + 1.0
#if EF_new <= 1.0:
self.ids.matrixModulus.text = str(EM_new)
def matrixModulusDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
EM = float(self.ids.matrixModulus.text)
EM_new = EM - 1.0
if EM_new > 0.0:
self.ids.matrixModulus.text = str(EM_new)
def matrixShearModulusUp(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
GM = float(self.ids.matrixShearModulus.text)
GM_new = GM + 1.0
#if EF_new <= 1.0:
self.ids.matrixShearModulus.text = str(GM_new)
def matrixShearModulusDown(self):
yarnVal = self.ids.yarnSelectSpinner.text
matrixVal = self.ids.matrixSelectSpinner.text
if matrixVal != 'Select Matrix':
GM = float(self.ids.matrixShearModulus.text)
GM_new = GM - 1.0
if GM_new > 0.0:
self.ids.matrixShearModulus.text = str(GM_new)
#cooridnate system transfomation matrix
class CoordinateTransform(BoxLayout):
def CalculateTransform(self):
angle = float(self.ids.braidAngle.text)
angleRad = angle * (math.pi / 180)
T11 = math.cos(angleRad) ** 2
T12 = math.cos(angleRad) ** 2
T13 = 2*math.cos(angleRad) * math.sin(angleRad)
T21 = T12
T22 = math.sin(angleRad) ** 2
T23 = -T13
T31 = math.cos(angleRad) * math.sin(angleRad)
T32 = -T31
T33 = (math.cos(angleRad) ** 2) - (math.sin(angleRad) ** 2)
T11inv = T11
T12inv = T12
T13inv = -T13
T21inv = T12inv
T22inv = T22
T23inv = T13
T31inv = -T31
T32inv = -T31inv
T33inv = T33
#Display transformation matrix to screen
#format output for 3 digits after decimal place
self.ids.T11.text = '{0:.3f}'.format(T11)
self.ids.T12.text = '{0:.3f}'.format(T12)
self.ids.T13.text = '{0:.3f}'.format(T13)
self.ids.T21.text = '{0:.3f}'.format(T21)
self.ids.T22.text = '{0:.3f}'.format(T22)
self.ids.T23.text = '{0:.3f}'.format(T23)
self.ids.T31.text = '{0:.3f}'.format(T31)
self.ids.T32.text = '{0:.3f}'.format(T32)
self.ids.T33.text = '{0:.3f}'.format(T33)
#Display inverse transformation matrix to screen
self.ids.T11inv.text = '{0:.3f}'.format(T11inv)
self.ids.T12inv.text = '{0:.3f}'.format(T12inv)
self.ids.T13inv.text = '{0:.3f}'.format(T13inv)
self.ids.T21inv.text = '{0:.3f}'.format(T21inv)
self.ids.T22inv.text = '{0:.3f}'.format(T22inv)
self.ids.T23inv.text = '{0:.3f}'.format(T23inv)
self.ids.T31inv.text = '{0:.3f}'.format(T31inv)
self.ids.T32inv.text = '{0:.3f}'.format(T32inv)
self.ids.T33inv.text = '{0:.3f}'.format(T33inv)
def AngleDown(self):
angle = float(self.ids.braidAngle.text)
angleNew = angle - 1.0
self.ids.braidAngle.text = '{0:.1f}'.format(angleNew)
def AngleUp(self):
angle = float(self.ids.braidAngle.text)
angleNew = angle + 1.0
self.ids.braidAngle.text = '{0:.1f}'.format(angleNew)
#Calculation of braid manufacturing parameters using input braid geometry and braid machine kinematics
class BraidManufacturingAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
class BraidManufacturing(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutManufacturing(self):
content = BraidManufacturingAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Braid Manufacturing About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def CalculateManufacture(self):
R = float(self.ids.radius.text)
mandrelVelocity = float(self.ids.mandrelVelocity.text)
rotationalVelocity = float(self.ids.carrierSpeed.text)
Wy = float(self.ids.yarnWidth.text)
gamma = math.radians(float(self.ids.halfConeAngle.text))
N = float(self.ids.carriers.text)
#Calculate braid angle from mandrel/ carrier speed
rho = 2 * math.pi * R * (mandrelVelocity / rotationalVelocity)
angle = math.atan(rho) * (180 / math.pi)
self.ids.braidAngle.text = '{0:.1f}'.format(angle)
#Calculate Braid Jam Angle
numerator = Wy * math.sin(gamma)
denominator = 2 * R * math.sin(2 * math.pi * math.sin(gamma) / N)
thetaJammed = (math.acos(numerator / denominator)) * (180 / math.pi)
self.ids.braidJamAngle.text = '{0:.1f}'.format(thetaJammed)
#Calculate Yarn Undulation and Shift angle
#angle = float(self.ids.braidAngle.text)
beta = 2 * math.pi / N
Angle = math.radians(angle)
Lund = R*beta / math.sin(Angle)
self.ids.yarnUndulation.text = '{0:.3f}'.format(Lund)
self.ids.shiftAngle.text = '{0:.3f}'.format(beta)
def RadiusDown(self):
radius = float(self.ids.radius.text)
radiusNew = radius - 0.1
if radiusNew > 0.0:
self.ids.radius.text = '{0:.1f}'.format(radiusNew)
def RadiusUp(self):
radius = float(self.ids.radius.text)
radiusNew = radius + 0.1
self.ids.radius.text = '{0:.1f}'.format(radiusNew)
def yarnWidthDown(self):
yarnWidth = float(self.ids.yarnWidth.text)
yarnWidthNew = yarnWidth - 0.1
if yarnWidthNew > 0.0:
self.ids.yarnWidth.text = '{0:.1f}'.format(yarnWidthNew)
def yarnWidthUp(self):
yarnWidth = float(self.ids.yarnWidth.text)
yarnWidthNew =yarnWidth + 0.1
self.ids.yarnWidth.text = '{0:.1f}'.format(yarnWidthNew)
def CarriersDown(self):
carriers = int(self.ids.carriers.text)
carriersNew = carriers - 1
if carriersNew > 0:
self.ids.carriers.text = '{0:d}'.format(carriersNew)
def CarriersUp(self):
carriers = int(self.ids.carriers.text)
carriersNew =carriers + 1
self.ids.carriers.text = '{0:d}'.format(carriersNew)
def mandrelVelocityDown(self):
mandrelVelocity = float(self.ids.mandrelVelocity.text)
mandrelVelocityNew = mandrelVelocity - 1.0
if mandrelVelocityNew > 0.0:
self.ids.mandrelVelocity.text = '{0:.1f}'.format(mandrelVelocityNew)
def mandrelVelocityUp(self):
mandrelVelocity = float(self.ids.mandrelVelocity.text)
mandrelVelocityNew =mandrelVelocity + 1.0
self.ids.mandrelVelocity.text = '{0:.1f}'.format(mandrelVelocityNew)
def carrierSpeedDown(self):
carrierSpeed = float(self.ids.carrierSpeed.text)
carrierSpeedNew = carrierSpeed - 1.0
if carrierSpeedNew > 0.0:
self.ids.carrierSpeed.text = '{0:.1f}'.format(carrierSpeedNew)
def carrierSpeedUp(self):
carrierSpeed = float(self.ids.carrierSpeed.text)
carrierSpeedNew = carrierSpeed + 1.0
self.ids.carrierSpeed.text = '{0:.1f}'.format(carrierSpeedNew)
def halfConeAngleDown(self):
halfConeAngle = float(self.ids.halfConeAngle.text)
halfConeAngleNew = halfConeAngle - 1.0
if halfConeAngleNew > 0.0:
self.ids.halfConeAngle.text = '{0:.1f}'.format(halfConeAngleNew)
def halfConeAngleUp(self):
halfConeAngle = float(self.ids.halfConeAngle.text)
halfConeAngleNew = halfConeAngle + 1.0
self.ids.halfConeAngle.text = '{0:.1f}'.format(halfConeAngleNew)
class ScreenMenu(Spinner):
pass
class MainBar(BoxLayout):
pass
class MachineSetupAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Visual guide for the setup of a braiding machine to produce different braiding patterns
class MachineSetup(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutMachineSetup(self):
content = MachineSetupAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Braid Machine Setup About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
fileNames = 'None'
def patternSelect(self):
braidPattern = self.ids.braidPatternSpinner.text
curdir = os.path.dirname(os.path.realpath(__file__))
braidFileType = "*.jpg"
startImg = 0
if braidPattern == "Diamond Full":
DiamondBraid = '\Diamond_FullLoad\BraidMachine_V3_Step01-01.tif'
folder = "Diamond_FullLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Diamond Half":
braid = "\Diamond_HalfLoad\*.jpg"
folder = "Diamond_HalfLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Regular Full":
braid = "\RegularFullLoad\*.jpg"
folder = "RegularFullLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Regular One-Third":
braid = "\RegularThirdLoad\*.jpg"
folder = "RegularThirdLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
elif braidPattern == "Hercules":
braid = "\HerculesHalfLoad\*.jpg"
folder = "HerculesHalfLoad"
pathName = os.path.join(curdir, folder)
names = os.path.join(pathName, braidFileType)
fileNames = sorted(glob.glob(names))
self.ids.patternImage.source = fileNames[startImg]
numImg = len(fileNames)
self.ids.imageProgressBar.max = numImg - 1
self.ids.imageProgressBar.value = startImg
global fileNames, startImg
def backButton(self):
braidPatternVal = self.ids.braidPatternSpinner.text
if braidPatternVal != 'Select Braid Pattern':
global fileNames, startImg
if startImg > 0:
startImg = startImg - 1
self.ids.patternImage.source = fileNames[startImg]
self.ids.imageProgressBar.value = startImg
def forwardButton(self):
braidPatternVal = self.ids.braidPatternSpinner.text
if braidPatternVal != 'Select Braid Pattern':
global fileNames, startImg
numImg = len(fileNames)
if startImg < numImg:
self.ids.patternImage.source = fileNames[startImg]
self.ids.imageProgressBar.value = startImg
startImg = startImg + 1
#print startImg
class VolumeFractionAbout(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
#Volume Fraction and Cover Factor Calculation
class VolumeFraction(BoxLayout):
def dismiss_popup(self):
self._popup.dismiss()
def AboutVolumeFraction(self):
content = VolumeFractionAbout(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Volume Fraction About", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def load(self):
pass
def CalculateVF(self, data):
braidVal = self.ids.braidType.text
yarnShapeVal = self.ids.braidCrossSection.text
if braidVal != 'Braid Type' and yarnShapeVal != "Select Yarn Shape":
#get input values from user
r0 = float(self.ids.braidRadius.text)
yarnWidth = float(self.ids.yarnWidth.text)
yarnThickness = float(self.ids.yarnThickness.text)
numberYarns = float(self.ids.numberYarns.text)
thetaDeg = float(self.ids.theta.text)
theta = math.radians(thetaDeg)
braidType = self.ids.braidType.text
braidCrossSection = self.ids.braidCrossSection.text
#print braidCrossSection
if yarnThickness > 0 and yarnWidth > 0 and numberYarns > 0 and theta > 0 and r0 > 0:
t = 2* yarnThickness
#calculate yarn cross sectional shape
if braidCrossSection == "Ellipse":
yarnArea = math.pi * yarnWidth * 0.5 * yarnThickness * 0.5
elif braidCrossSection == "Circle":
yarnArea = math.pi * (math.pow(yarnWidth * 0.5, 2))
elif braidCrossSection == "Rectangle":
yarnArea = yarnWidth * yarnThickness
if braidType == "Diamond":
jammed = yarnArea * 4 * numberYarns / (2 * math.pi * r0 * t*math.cos(theta))
if jammed <= 1:
Vf = yarnArea * 4 * numberYarns / (2 * math.pi * r0 * t*math.cos(theta))
elif jammed >=1:
Vf = 1
coverJammed = yarnWidth * numberYarns / (math.pi * r0 * math.cos(theta))
if coverJammed <= 1:
CF = coverJammed
elif coverJammed >1:
CF = 1
self.ids.volumeFraction.text = '{0:.3f}'.format(Vf)
self.ids.coverFactor.text = '{0:.3f}'.format(CF)
def ShowBraidPattern(self):
braidPattern = self.ids.braidType.text
if braidPattern == "Diamond":
self.ids.braidPatternImage.source = 'DiamondBraid_45deg.jpg'
elif braidPattern == "Regular":
self.ids.braidPatternImage.source = 'RegularBraid_45deg.jpg'
elif braidPattern == "Hercules":
self.ids.braidPatternImage.source = 'HerculesBraid_45deg.jpg'
#This is the angle finder widget that allows drawing two straight lines and calculates the minor angle between the two.
class Angle(Widget):
#This function initializes the widget with a touch count of zero
def __init__(self, **kwargs):
super(Angle, self).__init__(**kwargs)
self.touch_down_count = 0
#This function defines the actions that take place when a touch event occurs
def on_touch_down(self, touch):
#when touch count = 2, the canvas is cleared, getting rid of the lines and angle
if self.touch_down_count == 2:
self.canvas.clear()
return
#when touch count is greater than 2, we reset the count to zero to allow for new lines to be drawn and measured
if self.touch_down_count > 2:
self.touch_down_count = 0
#Record the touch coordinates in x and y as variables
x1 = touch.x
y1 = touch.y
#create a label on touch and store it in the user dictionary to be accessed later by an update function
touch.ud['label'] = Label(size_hint=(None, None))
#when the touch count is 0 or 1, we will record the touch coordinates and draw a crosshair at the touch location
if self.touch_down_count <= 1:
#add a label widget
self.add_widget(touch.ud['label'])
with self.canvas:
#save the touch points to the user dictionary
touch.ud['x1'] = x1
touch.ud['y1'] = y1
#set parameters for crosshair display
Color(1, 0, 0)
l = dp(40)
w = dp(3)
#draw crosshair
Rectangle(pos=(touch.ud['x1'] - w / 2, touch.ud['y1'] - l / 2), size=(w, l))
Rectangle(pos=(touch.ud['x1'] - l / 2, touch.ud['y1'] - w / 2), size=(l, w))
#Function to define what happens on a drag action
def on_touch_move(self, touch):
#Record the touch coordinates to variables
x2 = touch.x
y2 = touch.y
#Save touch coordinates to the user dictionary
touch.ud['x2'] = x2
touch.ud['y2'] = y2
ud = touch.ud
#define a group, g, that will be assigned to the line drawn to allow the line to be redrawn as movements occur, leaving only one line on the screen
ud['group'] = g = str(touch.uid)
self.canvas.remove_group(g)
#When the touch count is zero (first touch), we define a vector v1 based on the touch positions in ud
if self.touch_down_count == 0:
v1 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v1 = v1
#When the touch count is 1 (second touch), we define a vector v2 based on the touch positions in ud. The angle between vectors v1 and v2 is then calculated.
if self.touch_down_count == 1:
v2 = (touch.ud['x2'] - touch.ud['x1'], touch.ud['y2'] - touch.ud['y1'])
self.v2 = v2
angle = Vector(self.v1).angle(self.v2)
absoluteAngle = abs(angle)
#The following if statement is used to ensure the minor angle is always calculated
if absoluteAngle > 90:
absoluteAngle = 180 - absoluteAngle
#The next two lines are used to update the angle label value as the lines are moved around
touch.ud['angle'] = absoluteAngle
self.update_touch_label(touch.ud['label'], touch)
#If the touch count is greater than 1 (third touch), then this function will end and the canvas will clear as in the previous function
if self.touch_down_count > 1:
return
#This defines the line and crosshair that is drawn between the initial touch point and where the finger has been dragged
with self.canvas:
Color(1, 0, 0)
l = dp(40)
w = dp(3)
Line(points=[touch.ud['x1'], touch.ud['y1'], x2, y2], width=dp(1.5), group=g)
Rectangle(pos=(touch.ud['x2'] - w / 2, touch.ud['y2'] - l / 2), size=(w, l), group=g)
Rectangle(pos=(touch.ud['x2'] - l / 2, touch.ud['y2'] - w / 2), size=(l, w), group=g)
#this function defines what to do when a touch is released. The touch count is simply incremented
def on_touch_up(self, touch):
self.touch_down_count += 1
#This function defines how the angle label is to be updated. It indicates the number of digits to show, the label size and position, color, and font type
def update_touch_label(self, label, touch):
label.text = '%.3f deg' % (touch.ud['angle'])
label.pos = (self.center_x, self.height - dp(60))
label.font_size = '25 dp'
label.size = 1, 1
label.color = 0, 0, 0, 1
label.bold = 1
class BraidedCompositeDesignApp(App):
def build(self):
self.screen = None
self.root = GridLayout(rows = 2, cols = 1)
self.screen_layout = BoxLayout()
self.menu = ScreenMenu()
self.root.add_widget(self.menu)
self.root.add_widget(self.screen_layout)
self.menu.bind(text=self.select_screen)
self.show('Main')
#control window size for screen shots
Window.size= (360,640)
return self.root
def select_screen(self, *args):
self.show(self.menu.text)
def ensure_dir(self,f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
curdir = os.path.dirname(os.path.realpath(__file__))
print 'current dir', curdir
img1 = "Test Images/pic1.jpg"
img2 = "Test Images/pic2.jpg"
img3 = "Test Images/pic3.jpg"
img4 = "Test Images/pic4.jpg"
img5 = "Test Images/pic5.jpg"
name1 = os.path.join(curdir, img1)
name2 = os.path.join(curdir, img2)
name3 = os.path.join(curdir, img3)
name4 = os.path.join(curdir, img4)
name5 = os.path.join(curdir, img5)
shutil.copy(name1, d)
shutil.copy(name2, d)
shutil.copy(name3, d)
shutil.copy(name4, d)
shutil.copy(name5, d)
def on_pause(self):
return True
def on_resume(self):
pass
def show(self, name='Main'):
if self.screen is not None:
self.screen_layout.remove_widget(self.screen)
self.screen = None
if name == 'Main':
screen = MainScreen()
elif name == 'Micromechanics':
screen = MicroMechanics()
elif name == 'Lamina Strength':
screen = LaminaStrength()
elif name == 'CS Transform':
screen = CoordinateTransform()
elif name == 'Braid Manufacturing':
screen = BraidManufacturing()
#screen = braidManufacture()
elif name == 'Volume Fraction':
screen = VolumeFraction()
elif name == 'Angle':
#check to see if directory is available, if not create new directory and load test images
#into this directory
filename = "/sdcard/Pictures/BraidedCompositeApp/TestImages/"
self.ensure_dir(filename)
screen = AngleLayout()
elif name == 'Braid Machine Setup':
screen = MachineSetup()
elif name == 'About':
screen = About_Screen()
else:
raise Exception('Invalid screen name')
self.screen = screen
self.screen_layout.add_widget(screen)
if __name__ == "__main__":
BraidedCompositeDesignApp().run()
| mit | 7,503,996,245,676,990,000 | 37.383667 | 164 | 0.571615 | false | 3.559731 | false | false | false |
rpetrenko/test-reporter | server/api/common.py | 1 | 1323 | # This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
import logging
from flask_restplus import Api
from server import settings
from bson import json_util
import json
log = logging.getLogger(__name__)
api = Api(version='1.0',
title='Test Reporter API',
description='API for test reporter')
@api.errorhandler
def default_error_handler(e):
# message = 'An unhandled exception occurred.'
log.exception(e)
if not settings.FLASK_DEBUG:
return {'message': str(e)}, 500
def db_response_to_json(x):
json_str = json.dumps(x, default=json_util.default)
return json.loads(json_str)
def jenkins_response_to_json(x):
return json.loads(x)
def insert_creds_to_jenkins_url(username, api_key, uri):
parts = uri.split("://")
assert len(parts) == 2
if api_key:
insert_part = "{}:{}@".format(username, api_key)
elif username:
insert_part = "{}@".format(username)
else:
insert_part = ""
uri = "{}://{}{}".format(parts[0], insert_part, parts[1])
return uri
def create_jenkins_uri(username, api_key, uri):
uri = insert_creds_to_jenkins_url(username, api_key, uri)
if not uri.endswith('/'):
uri = uri + '/'
return "{}api/json".format(uri)
| apache-2.0 | -776,886,979,894,500,600 | 23.962264 | 68 | 0.643991 | false | 3.472441 | false | false | false |
anisotropi4/goldfinch | bin/dump-tree.py | 1 | 1613 | #!/usr/bin/python3
import xml.etree.cElementTree as ET
import sys
import argparse
parser = argparse.ArgumentParser(description='Strip namespace and a list of xml-tags in a tsv format')
parser.add_argument('--path', dest='path', type=str, default='',
help='output directory file')
parser.add_argument('inputfile', type=str, nargs='?', help='name of xml-file to parse')
parser.add_argument('outputfile', type=str, nargs='?', help='name of output file')
args = parser.parse_args()
path = args.path
if path != '':
path = path + '/'
fin = sys.stdin
if args.inputfile:
fin = open(args.inputfile, 'r')
fout = sys.stdout
if args.outputfile:
fout = open(path + args.outputfile, 'w')
def strip_ns(tag, namespaces):
for nk, nv in namespaces.items():
if tag.startswith(nk+':'):
return tag[len(nk)+1:]
if tag.startswith('{'+nv+'}'):
return tag[len(nv)+2:]
return tag
namespaces = {}
document = ET.iterparse(fin, events=('start', 'end', 'start-ns', 'end-ns'))
s = []
n = 0
for event, e in document:
if event == 'start-ns':
(nk, nv) = e
namespaces[nk] = nv
continue
if event == 'end-ns':
namespaces.pop('key', None)
continue
if event == 'start':
tag = strip_ns(e.tag, namespaces)
s.append(tag)
n = n + 1
r = "\t".join(s + [str(n)])
fout.write(r)
fout.write('\n')
e.clear()
if event == 'end':
s.pop()
n = n - 1
if fout is not sys.stdout:
fout.close()
| mit | 1,958,677,056,999,847,400 | 22.376812 | 102 | 0.555487 | false | 3.374477 | false | false | false |
pydio/pydio-sync | src/pydio/res/i18n/html_strings.py | 1 | 9431 | def _(a_string): return a_string
var_1=_('How can I find my server URL?')
var_2=_('The server URL is the adress that you can see in your browser when accessing Pydio via the web.')
var_3=_('It starts with http or https depending on your server configuration.')
var_4=_('If you are logged in Pydio and you see the last part of the URL starting with "ws-", remove this part and only keep the beginning (see image below).')
var_5=_('Got it!')
var_6=_('Connecting ...')
var_7=_('Configure Connection')
var_8=_('Error while trying to connect to %1 :')
var_9=_('%1')
var_10=_('Connect to the server with the same URL as the one you would use to access through a web browser, e.g. http://mydomain.com/pydio')
var_11=_('Required')
var_12=_('Required')
var_13=_('Required')
var_14=_('Tips')
var_15=_('where can I find the server URL?')
var_16=_('Connect')
var_17=_('Trust SSL certificate')
var_18=_('1/3 Select a workspace')
var_19=_('Welcome %1!')
var_20=_('You are connecting to %1')
var_21=_('change')
var_22=_('change')
var_23=_('Remote workspace')
var_24=_('This workspace is read only!')
var_25=_('Synchronise only a subfolder of this workspace')
var_26=_('loading')
var_27=_('Whole workspace')
var_28=_('loading')
var_29=_('Workspace')
var_30=_('Folder')
var_31=_('Change')
var_32=_('Next')
var_33=_('Advanced Parameters')
var_34=_('Save changes')
var_35=_('2/3 Select a destination')
var_36=_('By default a local folder will be created on your computer')
var_37=_('Change')
var_38=_('3/3 Optional Parameters')
var_39=_('Server')
var_40=_('Workspace')
var_41=_('Folder')
var_42=_('Whole workspace')
var_43=_('change')
var_44=_('Local folder')
var_45=_('change')
var_46=_('Name this synchro')
var_47=_('Advanced Parameters')
var_48=_('Previous')
var_49=_('Next')
var_50=_('Previous')
var_51=_('Next')
var_52=_('SYNC NAME')
var_53=_('DATA SIZE')
var_54=_('ESTIMATED TIME')
var_55=_('Ready for ignition!')
var_56=_('Are you ready to launch the synchronization?')
var_57=_('Your data will be in orbit in no time!')
var_58=_('A sync task with similar parameters exists.')
var_59=_('Please')
var_60=_('change parameters')
var_61=_('A sync task with similar parameters exists.')
var_62=_('You may want to')
var_63=_('change parameters')
var_64=_('FIRE THE ROCKET!')
var_65=_('change parameters')
var_66=_('Synchronization running...')
var_67=_('Liftoff! First sync can take some time...')
var_68=_('CREATE NEW SYNCHRO')
var_69=_('DONE')
var_70=_('Sync Direction')
var_71=_('Modifications are sent to the server but the client does not download anything.')
var_72=_('Modifications from both sides (local/remote) are automatically reflected on the other side.')
var_73=_('Modifications from the server are downloaded buth nothing is sent to the server.')
var_74=_('Upload Only')
var_75=_('computer to server')
var_76=_('Bi-directional')
var_77=_('default when possible')
var_78=_('Download Only')
var_79=_('server to computer')
var_80=_('Sync Frequency')
var_81=_('By default, the application will synchronize automatically')
var_82=_('Automatically')
var_83=_('Manually')
var_84=_('Given Time')
var_85=_('Run the sync every day at')
var_86=_('Conflicts')
var_87=_('When files were modified on both the server and your computer at the same time, a conflict is detected.')
var_88=_('Automatic')
var_89=_('Solve conflicts manually')
var_90=_('With the default keep-both behavior conflicting files will be copied on your local sync. Which version is to be kept?')
var_91=_('Keep both')
var_92=_('Prefer local')
var_93=_('Prefer remote')
var_94=_('Connection settings')
var_95=_('Increase the timeout in case of slow responsive server')
var_96=_('Timeout in seconds')
var_97=_('You can increase or reduce the number of concurrent connections. More means a faster sync but requires a server with more resources. (Default: 4)')
var_98=_('Concurrent connections')
var_99=_('Include/Exclude from Sync')
var_100=_('Syncronise, use a list of files or patterns to include in the sync')
var_101=_('Do not synchronise, list of files or patterns to exclude from sync')
var_102=_('SYNC %1 parameters')
var_103=_('Server')
var_104=_('Workspace')
var_105=_('Folder')
var_106=_('Resync task')
var_107=_('This operation will make sure that your server and local folder are correctly synchronized. Beware, this can take a while, and may be resource intensive.')
var_108=_('Cancel')
var_109=_('Proceed')
var_110=_('Trigger a full re-indexation')
var_111=_('Label')
var_112=_('Server Connexion')
var_113=_('Login')
var_114=_('Password')
var_115=_('Local Folder')
var_116=_('Choose')
var_117=_('Remote workspace')
var_118=_('Workspace')
var_119=_('Folder')
var_120=_('Change')
var_121=_('This workspace is read only!')
var_122=_('Synchronise only a subfolder of this workspace')
var_123=_('Whole workspace')
var_124=_('[loading...]')
var_125=_('Advanced parameters')
var_126=_('Delete Sync')
var_127=_('Save Parameters')
var_128=_('Help us! ')
var_129=_('Give us your feedback to improve PydioSync.')
var_130=_('Please wait...')
var_131=_('PydioSync Feedback Form')
var_132=_('You have the power to help us improve PydioSync by submitting anonymously this simple form.')
var_133=_('Include the number of synced files;')
var_134=_('Include the last sequence number;')
var_135=_('Include server info;')
var_136=_('Include errors;')
var_137=_('Include the number of errors;')
var_138=_('Comments (Appreciated)')
var_139=_('About')
var_140=_('General configurations page')
var_141=_('Update settings')
var_142=_('Enable / Disable update here.')
var_143=_('Set the update check frequency (here 1 means update check only once a day, default value 0 means it check for update each time agent establishes a new connection with UI) ')
var_144=_('Update check frequency in days')
var_145=_('Date on which last update check happened')
var_146=_('Last update check was on: ')
var_147=_('Proxy settings')
var_148=_('Enable / Disable Proxy.')
var_149=_('If you want the network connections to pass through proxy, fill the parameters below.')
var_150=_('Log settings')
var_151=_('You can change the name of log file here.')
var_152=_('File Name')
var_153=_('Limit the number of log files to be stored locally.')
var_154=_('Number of log files')
var_155=_('Set restrictions on log file size here.')
var_156=_('Enhance the log details as you need them.')
var_157=_('Info')
var_158=_('Debug')
var_159=_('Warning')
var_160=_('Other settings')
var_161=_('Max wait time for local db access')
var_162=_('If you encounter database locked error try increasing this value')
var_163=_('Timeout in seconds')
var_164=_('Set Language')
var_165=_('Language ')
var_166=_('Update Settings')
var_167=_('Ooops, cannot contact agent! Make sure it is running correctly, process will try to reconnect in 20s')
var_168=_('Select a workspace')
var_169=_('Full path to the local folder')
var_170=_('Are you sure you want to delete this synchro? No data will be deleted')
var_171=_('computing...')
var_172=_('Status')
var_173=_('syncing')
var_174=_('Size')
var_175=_('Estimated time')
var_176=_('Status')
var_177=_('syncing')
var_178=_('Status')
var_179=_('syncing')
var_180=_('Last sync')
var_181=_('ERROR')
var_182=_('Status')
var_183=_('idle')
var_184=_('[DISABLED]')
var_185=_('Conflicts')
var_186=_('Solve Conflict')
var_187=_('Solved')
var_188=_('Last files synced')
var_189=_('Open File')
var_190=_('Transfers in progress')
var_191=_('An element named %1 was modified on both the server and on your local computer. Select how you want to solve this conflicting case:')
var_192=_('Apply to all conflicts')
var_193=_('Mine')
var_194=_('Both Versions')
var_195=_('Theirs')
var_196=_('Create a new synchronization')
var_197=_('Create a new synchronization')
var_198=_('Share %1 via Pydio')
var_199=_('Share %1 via Pydio')
var_200=_('Description')
var_201=_('Path')
var_202=_('Share item')
var_203=_('by creating a public link that can easily be sent to your contacts.')
var_204=_('You can customize the link parameters using the forms below.')
var_205=_('Secure Link Access')
var_206=_('Optional Password')
var_207=_('Password')
var_208=_('Expires After')
var_209=_('Days')
var_210=_('Downloads')
var_211=_('Security')
var_212=_('Password')
var_213=_('Expires after')
var_214=_('Days')
var_215=_('Downloads')
var_216=_('Advanced parameters')
var_217=_('Link Handle')
var_218=_('If you want a durable and pretty link (like https://.../my-share-link), you can use this field. Link handle will be generated if left empty.')
var_219=_('Description')
var_220=_('This will be displayed to the shared users.')
var_221=_('Access Rights')
var_222=_('By default, the item will be previewed and downloadable')
var_223=_('Preview')
var_224=_('Download')
var_225=_('Upload')
var_226=_('Generate Link')
var_227=_('Generate Link')
var_228=_('Share %1 via Pydio')
var_229=_('Shared Link')
var_230=_('Shared link to the selected item already exists. Below is the link to the selected item')
var_231=_('New shared link to the selected item has been generated')
var_232=_('Shared Link to the selected item has not been generated')
var_233=_('Copy to Clipboard')
var_234=_('UnShare Link')
var_235=_('Text has been copied to clipboard.')
var_236=_('Successfully unshared.')
var_237=_('Please wait ...')
var_238=_('Welcome to the Pydio Sync')
var_239=_('The easiest way to keep your data in control')
var_240=_('Loading...')
var_241=_('Get Started')
var_242=_('Required')
var_243=_('Proxy settings')
var_244=_('Get Started')
| gpl-3.0 | -2,384,637,885,232,840,700 | 37.493878 | 184 | 0.693564 | false | 3.507252 | false | false | false |
Neurita/boyle | boyle/files/utils.py | 1 | 1412 | # coding=utf-8
"""
Utilities for file management.
"""
# ------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <[email protected]>
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# ------------------------------------------------------------------------------
import os.path as op
import shutil
from .names import remove_ext, get_extension
def copy_w_ext(srcfile, destdir, basename):
""" Copy `srcfile` in `destdir` with name `basename + get_extension(srcfile)`.
Add pluses to the destination path basename if a file with the same name already
exists in `destdir`.
Parameters
----------
srcfile: str
destdir: str
basename:str
Returns
-------
dstpath: str
"""
ext = get_extension(op.basename(srcfile))
dstpath = op.join(destdir, basename + ext)
return copy_w_plus(srcfile, dstpath)
def copy_w_plus(src, dst):
"""Copy file from `src` path to `dst` path. If `dst` already exists, will add '+' characters
to the end of the basename without extension.
Parameters
----------
src: str
dst: str
Returns
-------
dstpath: str
"""
dst_ext = get_extension(dst)
dst_pre = remove_ext (dst)
while op.exists(dst_pre + dst_ext):
dst_pre += '+'
shutil.copy(src, dst_pre + dst_ext)
return dst_pre + dst_ext
| bsd-3-clause | 4,066,195,155,547,318,300 | 20.723077 | 96 | 0.550992 | false | 3.911357 | false | false | false |
hguemar/cinder | cinder/api/contrib/admin_actions.py | 1 | 11497 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import strutils
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import volume
LOG = logging.getLogger(__name__)
class AdminController(wsgi.Controller):
"""Abstract base class for AdminControllers."""
collection = None # api collection to extend
# FIXME(clayg): this will be hard to keep up-to-date
# Concrete classes can expand or over-ride
valid_status = set(['creating',
'available',
'deleting',
'error',
'error_deleting', ])
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
def _get(self, *args, **kwargs):
raise NotImplementedError()
def _delete(self, *args, **kwargs):
raise NotImplementedError()
def validate_update(self, body):
update = {}
try:
update['status'] = body['status'].lower()
except (TypeError, KeyError):
raise exc.HTTPBadRequest(explanation=_("Must specify 'status'"))
if update['status'] not in self.valid_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid status"))
return update
def authorize(self, context, action_name):
# e.g. "snapshot_admin_actions:reset_status"
action = '%s_admin_actions:%s' % (self.resource_name, action_name)
extensions.extension_authorizer('volume', action)(context)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = _("Updating %(resource)s '%(id)s' with '%(update)r'")
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = dict(id=id, update=update)
notifier = rpc.get_notifier('volumeStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self._update(context, id, update)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
notifier.info(context, self.collection + '.reset_status.end',
notifier_info)
return webob.Response(status_int=202)
@wsgi.action('os-force_delete')
def _force_delete(self, req, id, body):
"""Delete a resource, bypassing the check that it must be available."""
context = req.environ['cinder.context']
self.authorize(context, 'force_delete')
try:
resource = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
self._delete(context, resource, force=True)
return webob.Response(status_int=202)
class VolumeAdminController(AdminController):
"""AdminController for Volumes."""
collection = 'volumes'
# FIXME(jdg): We're appending additional valid status
# entries to the set we declare in the parent class
# this doesn't make a ton of sense, we should probably
# look at the structure of this whole process again
# Perhaps we don't even want any definitions in the abstract
# parent class?
valid_status = AdminController.valid_status.union(
set(['attaching', 'in-use', 'detaching']))
valid_attach_status = set(['detached', 'attached', ])
valid_migration_status = set(['migrating', 'error',
'completing', 'none',
'starting', ])
def _update(self, *args, **kwargs):
db.volume_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete(*args, **kwargs)
def validate_update(self, body):
update = {}
status = body.get('status', None)
attach_status = body.get('attach_status', None)
migration_status = body.get('migration_status', None)
valid = False
if status:
valid = True
update = super(VolumeAdminController, self).validate_update(body)
if attach_status:
valid = True
update['attach_status'] = attach_status.lower()
if update['attach_status'] not in self.valid_attach_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid attach status"))
if migration_status:
valid = True
update['migration_status'] = migration_status.lower()
if update['migration_status'] not in self.valid_migration_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid migration status"))
if update['migration_status'] == 'none':
update['migration_status'] = None
if not valid:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'status', 'attach_status' "
"or 'migration_status' for update."))
return update
@wsgi.action('os-force_detach')
def _force_detach(self, req, id, body):
"""Roll back a bad detach after the volume been disconnected."""
context = req.environ['cinder.context']
self.authorize(context, 'force_detach')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
self.volume_api.terminate_connection(context, volume,
{}, force=True)
self.volume_api.detach(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume')
def _migrate_volume(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
params = body['os-migrate_volume']
try:
host = params['host']
except KeyError:
raise exc.HTTPBadRequest(explanation=_("Must specify 'host'"))
force_host_copy = params.get('force_host_copy', False)
if isinstance(force_host_copy, basestring):
try:
force_host_copy = strutils.bool_from_string(force_host_copy,
strict=True)
except ValueError:
raise exc.HTTPBadRequest(
explanation=_("Bad value for 'force_host_copy'"))
elif not isinstance(force_host_copy, bool):
raise exc.HTTPBadRequest(
explanation=_("'force_host_copy' not string or bool"))
self.volume_api.migrate_volume(context, volume, host, force_host_copy)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume_completion')
def _migrate_volume_completion(self, req, id, body):
"""Complete an in-progress migration."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume_completion')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
params = body['os-migrate_volume_completion']
try:
new_volume_id = params['new_volume']
except KeyError:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'new_volume'"))
try:
new_volume = self._get(context, new_volume_id)
except exception.NotFound:
raise exc.HTTPNotFound()
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""
collection = 'snapshots'
def _update(self, *args, **kwargs):
db.snapshot_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete_snapshot(*args, **kwargs)
class BackupAdminController(AdminController):
"""AdminController for Backups."""
collection = 'backups'
valid_status = set(['available',
'error'
])
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = {'id': id, 'update': update}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1"
updated = "2012-08-25T00:00:00+00:00"
def get_controller_extensions(self):
exts = []
for class_ in (VolumeAdminController, SnapshotAdminController,
BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)
exts.append(extension)
return exts
| apache-2.0 | -87,235,681,852,157,700 | 36.449511 | 79 | 0.596677 | false | 4.368161 | false | false | false |
litedesk/litedesk-webserver-provision | src/provisioning/models.py | 1 | 20144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import datetime
from urlparse import urlparse
from autoslug import AutoSlugField
from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from litedesk.lib import airwatch
from model_utils import Choices
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel, TimeFramedModel, StatusModel
from qrcode.image.pure import PymagingImage
import qrcode
from audit.models import Trackable
from contrib.models import PropertyTable
from tenants.models import Tenant, TenantService, User
from signals import item_provisioned, item_deprovisioned
import okta
log = logging.getLogger(__name__)
class Provisionable(object):
def activate(self, user, **kw):
raise NotImplementedError
def deprovision(self, service, user, *args, **kw):
raise NotImplementedError
def provision(self, service, user, *args, **kw):
raise NotImplementedError
class UserProvisionable(TimeStampedModel):
user = models.ForeignKey(User)
service = models.ForeignKey(TenantService)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
@property
def tenant(self):
return self.user.tenant
def __unicode__(self):
return '%s provision for user %s on %s' % (
self.item, self.user, self.service)
class Meta:
unique_together = ('user', 'service', 'item_type', 'object_id')
class UserProvisionHistory(Trackable, TimeFramedModel):
user = models.ForeignKey(User)
service = models.ForeignKey(TenantService)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
@staticmethod
def on_provision(*args, **kw):
user = kw.get('user')
provisioned_item = kw.get('instance')
item_type = ContentType.objects.get_for_model(provisioned_item)
entry = UserProvisionHistory(
user=user,
service=kw.get('service'),
item_type=item_type,
object_id=provisioned_item.id,
start=datetime.datetime.now()
)
entry.save(editor=kw.get('editor'))
@staticmethod
def on_deprovision(*args, **kw):
user = kw.get('user')
provisioned_item = kw.get('instance')
item_type = ContentType.objects.get_for_model(provisioned_item)
for entry in user.userprovisionhistory_set.filter(
item_type=item_type,
object_id=provisioned_item.id,
service=kw.get('service'),
end__isnull=True
):
entry.end = datetime.datetime.now()
entry.save(editor=kw.get('editor'))
class Asset(TimeStampedModel, Provisionable):
objects = InheritanceManager()
name = models.CharField(max_length=1000)
slug = AutoSlugField(populate_from='name', unique=False, default='')
description = models.TextField(null=True, blank=True)
web = models.BooleanField(default=True)
mobile = models.BooleanField(default=False)
desktop = models.BooleanField(default=False)
@property
def __subclassed__(self):
return Asset.objects.get_subclass(id=self.id)
@property
def supported_platforms(self):
return [p for p in ['web', 'mobile', 'desktop'] if getattr(self, p)]
def provision(self, service, user, editor=None):
if self.can_be_managed_by(service):
UserProvisionable.objects.create(
service=service,
user=user,
item_type=ContentType.objects.get_for_model(self),
object_id=self.id
)
item_provisioned.send(
sender=self.__class__,
editor=editor,
instance=self,
service=service,
user=user
)
def deprovision(self, service, user, editor=None):
UserProvisionable.objects.filter(
service=service,
user=user,
item_type=ContentType.objects.get_for_model(self),
object_id=self.id
).delete()
item_deprovisioned.send(
sender=self.__class__,
editor=editor,
instance=self,
service=service,
user=user
)
def can_be_managed_by(self, service):
return service.type in self.supported_platforms
def __unicode__(self):
return self.name
class Software(Asset):
EXPENSE_CATEGORY = 'software'
def provision(self, service, user, editor=None):
service.assign(self, user)
super(Software, self).provision(service, user, editor=editor)
def deprovision(self, service, user, editor=None):
service.unassign(self, user)
super(Software, self).deprovision(service, user, editor=editor)
class Device(Asset):
EXPENSE_CATEGORY = 'devices'
image = models.ImageField(null=True, blank=True)
@property
def __subclassed__(self):
if 'chrome' in self.name.lower():
self.__class__ = ChromeDevice
return self
def _get_email_template_parameters(self, service, user):
device = self.__subclassed__
if isinstance(device, ChromeDevice):
return {
'user': user,
'service': service,
'site': settings.SITE,
'device': device,
'title': '%s - Welcome to Google' % settings.SITE.get('name'),
'include_additional_information_message': 'true'
}
return None
def _get_email_template(self, service, format='html'):
extension = {
'text': 'txt',
'html': 'html'
}.get(format, format)
template_name = None
if isinstance(self.__subclassed__, ChromeDevice):
template_name = 'activation_chromebook'
return template_name and 'provisioning/mail/%s/%s.tmpl.%s' % (
format, template_name, extension
)
def provision(self, service, user, editor=None):
super(Device, self).provision(service, user, editor=editor)
html_template = self._get_email_template(service, format='html')
text_template = self._get_email_template(service, format='text')
if not (html_template or text_template):
return
template_parameters = self._get_email_template_parameters(service, user)
text_msg = render_to_string(text_template, template_parameters)
html_msg = render_to_string(html_template, template_parameters)
send_mail(
template_parameters['title'],
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
def activate(self, user, *args, **kw):
pass
class MobileDataPlan(Asset):
pass
class ChromeDevice(Device):
def can_be_managed_by(self, service):
return service.type == TenantService.PLATFORM_TYPE_CHOICES.web
class Meta:
proxy = True
class TenantAsset(PropertyTable):
tenant = models.ForeignKey(Tenant)
asset = models.ForeignKey(Asset)
class Meta:
unique_together = ('tenant', 'asset')
class InventoryEntry(Trackable, StatusModel):
STATUS = Choices('handed_out', 'returned')
user = models.ForeignKey(User)
tenant_asset = models.ForeignKey(TenantAsset)
serial_number = models.CharField(max_length=100, null=False, default='N/A')
@property
def tenant(self):
return self.user.tenant
def save(self, *args, **kwargs):
super(InventoryEntry, self).save(
editor=self.user.tenant.primary_contact, *args, **kwargs)
# TODO : if the inventory item is a google device make a call to the google api to
# save the username in the annotated user field
def __unicode__(self):
return '%s (%s)' % (self.user.username, self.serial_number)
class Okta(TenantService, Provisionable):
PLATFORM_TYPE = TenantService.PLATFORM_TYPE_CHOICES.web
ACTIVE_DIRECTORY_CONTROLLER = True
DEACTIVATION_EXCEPTION = okta.UserNotActiveError
domain = models.CharField(max_length=200)
@property
def portal_url(self):
return 'https://%s.okta.com' % self.domain
@property
def portal_help_url(self):
return '%s/help/login' % self.portal_url
def get_client(self):
return okta.Client(self.domain, self.api_token)
def get_service_user(self, user):
client = self.get_client()
return client.get(okta.User, user.tenant_email)
def get_users(self):
client = self.get_client()
return client.get_users()
def register(self, user):
client = self.get_client()
try:
client.add_user(user, activate=False)
except okta.UserAlreadyExistsError:
pass
return self.get_service_user(user)
def activate(self, user, editor=None):
client = self.get_client()
try:
service_user = self.get_service_user(user)
except okta.ResourceDoesNotExistError:
service_user = self.register(user)
status_before = getattr(service_user, 'status', 'STAGED')
activation_url = None
try:
activation_response = client.activate_user(service_user,
send_email=False)
except okta.UserAlreadyActivatedError:
pass
else:
if status_before == 'STAGED':
activation_url = activation_response.get('activationUrl')
password = user.get_remote().set_one_time_password()
template_parameters = {
'user': user,
'service': self,
'site': settings.SITE,
'activation_url': activation_url,
'password': password
}
text_msg = render_to_string(
'provisioning/mail/text/activation_okta.tmpl.txt',
template_parameters
)
html_msg = render_to_string(
'provisioning/mail/html/activation_okta.tmpl.html',
template_parameters
)
send_mail(
'%s - Welcome to %s' % (settings.SITE.get('name'), self.name),
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
super(Okta, self).activate(user, editor)
def assign(self, asset, user):
log.debug('Assigning %s to %s on Okta' % (asset, user))
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=asset)
client = self.get_client()
service_user = self.get_service_user(user)
service_application = client.get(okta.Application,
metadata.get('application_id'))
try:
service_application.assign(service_user,
profile=metadata.get('profile'))
except Exception, why:
log.warn('Error when assigning %s to %s: %s' % (asset, user, why))
def unassign(self, asset, user):
log.debug('Removing %s from %s on Okta' % (asset, user))
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=asset)
client = self.get_client()
service_user = self.get_service_user(user)
service_application = client.get(okta.Application,
metadata.get('application_id'))
try:
service_application.unassign(service_user)
except okta.UserApplicationNotFound, e:
log.info('Failed to unassign %s from %s: %s' % (asset, user, e))
except Exception, why:
log.warn('Error when unassigning %s to %s: %s' % (asset, user, why))
@classmethod
def get_serializer_data(cls, **data):
return {
'domain': data.get('domain')
}
class Meta:
verbose_name = 'Okta'
class AirWatch(TenantService, Provisionable):
PLATFORM_TYPE = 'mobile'
QRCODE_ROOT_DIR = os.path.join(settings.MEDIA_ROOT, 'airwatch_qrcodes')
QRCODE_ROOT_URL = settings.SITE.get(
'host_url') + settings.MEDIA_URL + 'airwatch_qrcodes/'
QRCODE_TEMPLATE = 'https://awagent.com?serverurl={0}&gid={1}'
DEACTIVATION_EXCEPTION = airwatch.user.UserNotActiveError
username = models.CharField(max_length=80)
password = models.CharField(max_length=1000)
server_url = models.URLField()
group_id = models.CharField(max_length=80)
@property
def portal_domain(self):
portal_domain = urlparse(self.server_url).netloc
if portal_domain.startswith('as'):
portal_domain = portal_domain.replace('as', 'ds', 1)
return portal_domain
def get_client(self):
return airwatch.client.Client(
self.server_url, self.username, self.password, self.api_token
)
def get_service_user(self, user):
client = self.get_client()
service_user = airwatch.user.User.get_remote(client, user.username)
if service_user is None:
service_user = airwatch.user.User.create(client, user.username)
return service_user
def get_usergroup(self, group_name):
client = self.get_client()
return airwatch.group.UserGroupHacked.get_remote(client, group_name)
def get_smartgroup(self, smartgroup_id):
client = self.get_client()
return airwatch.group.SmartGroup.get_remote(client, smartgroup_id)
def register(self, user):
client = self.get_client()
try:
return airwatch.user.User.create(client, user.username)
except airwatch.user.UserAlreadyRegisteredError:
return self.get_service_user(user)
@property
def qrcode(self):
server_domain = self.portal_domain
image_dir = os.path.join(self.QRCODE_ROOT_DIR, server_domain)
image_file_name = '{0}.png'.format(self.group_id)
image_file_path = os.path.join(image_dir, image_file_name)
if not os.path.exists(image_file_path):
if not os.path.exists(image_dir):
os.makedirs(image_dir)
data = self.QRCODE_TEMPLATE.format(server_domain, self.group_id)
image = qrcode.make(data, image_factory=PymagingImage, box_size=5)
with open(image_file_path, 'w') as image_file:
image.save(image_file)
image_url = self.QRCODE_ROOT_URL + server_domain + '/' + image_file_name
return image_url
def activate(self, user, editor=None):
service_user = self.get_service_user(user)
if service_user is None:
service_user = self.register(user)
try:
title = '%s - Welcome to AirWatch' % settings.SITE.get('name')
service_user.activate()
template_parameters = {
'user': user,
'service': self,
'site': settings.SITE,
'qr_code': self.qrcode
}
text_msg = render_to_string(
'provisioning/mail/text/activation_airwatch.tmpl.txt',
template_parameters
)
html_msg = render_to_string(
'provisioning/mail/html/activation_airwatch.tmpl.html',
template_parameters
)
send_mail(
title,
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
except airwatch.user.UserAlreadyActivatedError:
pass
else:
super(AirWatch, self).activate(user, editor)
def deactivate(self, user, editor=None):
super(AirWatch, self).deactivate(user, editor)
self.get_service_user(user).delete()
def __group_and_aw_user(self, software, user):
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=software)
group = self.get_usergroup(metadata.get('group_name'))
service_user = self.get_service_user(user)
return group, service_user
def assign(self, software, user):
if self.type not in software.supported_platforms:
return
log.debug('Assigning %s to %s on Airwatch' % (software, user))
group, aw_user = self.__group_and_aw_user(software, user)
try:
group.add_member(aw_user)
except airwatch.user.UserAlreadyEnrolledError:
pass
def unassign(self, software, user):
if self.type not in software.supported_platforms:
return
log.debug('Removing %s from %s on Airwatch' % (software, user))
group, aw_user = self.__group_and_aw_user(software, user)
try:
group.remove_member(aw_user)
except airwatch.user.UserNotEnrolledError:
pass
def get_all_devices(self):
endpoint = 'mdm/devices/search'
response = self.get_client().call_api(
'GET', endpoint)
response.raise_for_status()
if response.status_code == 200:
devices = [{'model': d['Model'], 'username': d['UserName'],
'serial_number': d[
'SerialNumber']} for d in
response.json().get('Devices')]
return devices
def get_available_devices(self):
return [d for d in self.get_all_devices()
if d['username'] == '' or d['username'] == 'staging']
@classmethod
def get_serializer_data(cls, **data):
return {
'username': data.get('username'),
'password': data.get('password'),
'server_url': data.get('server_url'),
'group_id': data.get('group_id')
}
class Meta:
verbose_name = 'AirWatch'
class MobileIron(TenantService, Provisionable):
PLATFORM_TYPE = 'mobile'
class TenantServiceAsset(PropertyTable):
service = models.ForeignKey(TenantService)
asset = models.ForeignKey(Asset)
@property
def tenant(self):
return self.service.tenant
@property
def platform(self):
return self.service.type
def __unicode__(self):
return 'Asset %s on %s' % (self.asset, self.service)
class Meta:
unique_together = ('service', 'asset')
class LastSeenEvent(TimeStampedModel):
user = models.ForeignKey(User)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
last_seen = models.DateTimeField()
item_provisioned.connect(UserProvisionHistory.on_provision,
dispatch_uid='provision')
item_deprovisioned.connect(UserProvisionHistory.on_deprovision,
dispatch_uid='deprovision')
if not getattr(settings, 'PROVISIONABLE_SERVICES'):
settings.PROVISIONABLE_SERVICES = [
'.'.join([__name__, k.__name__]) for k in [Okta, AirWatch, MobileIron]
]
if not getattr(settings, 'ASSET_CLASSES', []):
settings.ASSET_CLASSES = [
'.'.join([__name__, k.__name__]) for k in
[Software, Device, MobileDataPlan]
]
| apache-2.0 | 4,245,768,454,504,376,300 | 32.186161 | 90 | 0.610653 | false | 3.993656 | false | false | false |
bender-bot/bender | bender/_tests/test_main.py | 1 | 2381 | from io import StringIO
import pkg_resources
import pytest
import threading
import bender._main
from bender.backbones.console import BenderConsole
from bender.decorators import backbone_start
from bender.testing import VolatileBrain, DumbMessage
@pytest.mark.timeout(3.0)
def test_main(mock):
stdout = StringIO()
stdin = StringIO()
stdin.write(u'hey\nquit\n')
stdin.seek(0)
timer = threading.Timer(1.0, stdin.close)
timer.start()
console = BenderConsole(stdout=stdout, stdin=stdin)
mock.patch.object(bender._main, 'get_console', return_value=console)
mock.patch.object(bender._main, 'get_brain', return_value=VolatileBrain())
assert bender._main.main([]) == 0
assert 'Hey, my name is Bender' in stdout.getvalue()
@pytest.mark.timeout(3.0)
def test_backbone_selection(mock):
"""
Test that we can select backbones from the command line.
"""
quitter = install_quitter_backbone(mock)
mock.patch.object(bender._main, 'get_brain', return_value=VolatileBrain())
assert bender._main.main(['', '--backbone', 'quitter']) == 0
assert quitter.started
def install_quitter_backbone(mock):
"""
installs a "quitter" backbone: a backbone that immediately quits right
after starting.
It is installed as a distutils entry point by mocking the relevant
methods, as close to distutils as possible to ensure all our code is
tested.
This can be moved into a fixture, or even make QuitterBackbone
available in bender.testing.
"""
class QuitterBackbone(object):
def __init__(self):
self.on_message_received = None
self.started = False
@backbone_start
def start(self):
self.on_message_received(DumbMessage('quit', 'user'))
self.started = True
quitter = QuitterBackbone()
factory = lambda: quitter
class EntryPoint(object):
pass
quitter_entry_point = EntryPoint()
quitter_entry_point.name = 'quitter'
quitter_entry_point.load = lambda: factory
original_entry_points = pkg_resources.iter_entry_points
def iter_entry_points(name):
if name == 'bender_backbone':
return [quitter_entry_point]
else:
return original_entry_points(name)
mock.patch.object(pkg_resources, 'iter_entry_points', iter_entry_points)
return quitter
| lgpl-3.0 | -8,346,947,514,074,849,000 | 28.395062 | 78 | 0.677866 | false | 3.640673 | true | false | false |
gaccardo/buxfer_api | api/reporter.py | 1 | 16970 | import os
import math
from reportlab.pdfgen import canvas
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.piecharts import Pie
from reportlab.graphics.charts.legends import Legend
from reportlab.lib.colors import black, red, purple, green, \
maroon, brown, pink, white, HexColor
from reportlab.graphics import renderPDF
from reportlab.platypus import Table, TableStyle
from reportlab.lib.units import cm
from reportlab.platypus import PageBreak
import datetime
import settings
from currency_calculator import CurrencyCalculator
class Reporter( object ):
def __init__(self, data):
self.accounts = data['accounts']
self.transactions = data['transactions']
self.budgets = data['budgets']
self.reminders = data['reminders']
self.c = None
self.l = 800
cc = CurrencyCalculator()
self.dolar = None
try:
self.dolar = cc.get_dolar()['real']
except:
self.dolar = settings.DOLAR
self.pdf_chart_colors = [
HexColor("#0000e5"),
HexColor("#1f1feb"),
HexColor("#5757f0"),
HexColor("#8f8ff5"),
HexColor("#c7c7fa"),
HexColor("#f5c2c2"),
HexColor("#eb8585"),
HexColor("#e04747"),
HexColor("#d60a0a"),
HexColor("#cc0000"),
HexColor("#ff0000"),
]
def __prepare_document(self):
file_path = os.path.join(settings.REPORT_TMP,
settings.REPORT_NAME)
self.c = canvas.Canvas(file_path)
def __generate_header(self):
self.c.setFont('Helvetica', 28)
self.c.drawString(30, self.l, 'Estado general de cuentas')
self.c.setFont('Courier', 11)
hoy = datetime.datetime.now()
hoy = hoy.strftime('%d/%m/%Y')
self.c.drawString(30, 780, 'Fecha: %s' % hoy)
self.c.drawString(495, 780, 'Dolar: $%.2f' % self.dolar)
self.c.line(20,775,580,775)
def __get_totals_by_currency(self):
totals = dict()
for acc in self.accounts:
if acc.currency not in totals:
totals[acc.currency] = acc.balance
else:
totals[acc.currency] += acc.balance
return totals
def __accounts_amount(self):
self.c.setFont('Courier', 14)
self.c.drawString(30, 750, 'Cuentas')
data = [['Cuenta', 'Moneda', 'Saldo']]
self.l = 630
for acc in self.accounts:
data.append([acc.name, acc.currency,
'$%.2f' % acc.balance])
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
self.l -= 20
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Totales por moneda')
self.l -= 63
data2 = [['Moneda', 'Saldo']]
totals = self.__get_totals_by_currency()
for currency, amount in totals.iteritems():
data2.append([currency, amount])
t2 = Table(data2)
t2.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t2.wrapOn(self.c, 30, self.l)
t2.drawOn(self.c, 30, self.l)
def __translate_type(self, tipo):
types = dict()
types['income'] = 'ingreso'
types['expense'] = 'gasto'
types['transfer'] = 'tranferencia'
return types[tipo]
def __transactions(self):
self.l -= 20
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Movimientos')
header = ['Fecha', 'Tipo', 'Cuenta', 'Monto', 'Description']
data = [header]
for tra in self.transactions:
tipo = self.__translate_type(tra.t_type)
data.append([tra.date, tipo.upper(), tra.account,
'$%.2f' % tra.amount, tra.description])
registros = 24
filas = len(data) / float(registros)
coheficiente = math.ceil(len(data) / filas)
look = 0
datas = list()
datas_new = list()
while look < len(data):
second = int(look+coheficiente)
datas.append(data[look:second])
look = int(look+coheficiente)
datas_new.append(datas[0])
for dd in datas[1:][::-1]:
datas_new.append([header] + dd)
data1 = datas_new[0]
self.l -= len(data1) * 19
t = Table(data1)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
for dd in datas_new[1:][::-1]:
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.l = 800 - (len(dd) * 19)
t2 = Table(dd)
t2.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t2.wrapOn(self.c, 30, self.l)
t2.drawOn(self.c, 30, self.l)
def __add_graph(self):
drawing = Drawing(200, 100)
data = list()
labels = list()
self.c.drawString(370, 730,
'Distribucion en pesos'.encode('utf-8'))
for acc in self.accounts:
balance = acc.balance
if acc.currency == 'USD':
balance = balance * self.dolar
data.append(balance)
labels.append(acc.name)
pie = Pie()
pie.x = 280
pie.y = 630
pie.height = 100
pie.width = 100
pie.data = data
pie.labels = labels
pie.simpleLabels = 1
pie.slices.strokeWidth = 1
pie.slices.strokeColor = black
pie.slices.label_visible = 0
legend = Legend()
legend.x = 400
legend.y = 680
legend.dx = 8
legend.dy = 8
legend.fontName = 'Helvetica'
legend.fontSize = 7
legend.boxAnchor = 'w'
legend.columnMaximum = 10
legend.strokeWidth = 1
legend.strokeColor = black
legend.deltax = 75
legend.deltay = 10
legend.autoXPadding = 5
legend.yGap = 0
legend.dxTextSpace = 5
legend.alignment = 'right'
legend.dividerLines = 1|2|4
legend.dividerOffsY = 4.5
legend.subCols.rpad = 30
n = len(pie.data)
self.__setItems(n,pie.slices,
'fillColor',self.pdf_chart_colors)
legend.colorNamePairs = [(pie.slices[i].fillColor,
(pie.labels[i][0:20],'$%0.2f' % pie.data[i])) for i in xrange(n)]
drawing.add(pie)
drawing.add(legend)
x, y = 0, 0
renderPDF.draw(drawing, self.c, x, y, showBoundary=False)
def __per_account_statistic(self):
for acc in self.accounts:
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.l = 760
self.c.setFont('Courier', 14)
self.c.drawString(30, 800, 'Cuenta: %s' % \
acc.name)
header = ['Fecha', 'Tipo', 'Monto', 'Description']
data = [header]
g_data = list()
g_labe = list()
total = 0
for tra in self.transactions:
if tra.account == acc.name:
if tra.t_type in ['expense', 'transfer']:
tipo = self.__translate_type(tra.t_type)
data.append([tra.date, tipo.upper(),
'$%2.f' % tra.amount, tra.description])
total += tra.amount
g_data.append(tra.amount)
g_labe.append(tra.description.encode('utf-8'))
data.append(['TOTAL', '', '$%.2f' % total, ''])
if len(g_data) == 0 or len(g_labe) == 0:
self.c.setFont('Courier', 12)
self.c.drawString(30, 770, 'Sin movimientos negativos')
continue
from_title = 35
if len(data) != 2:
self.l -= ((len(data) * len(data)) + len(data)) + from_title
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier'),
('BACKGROUND', (0,-1), (-1,-1), red),
('TEXTCOLOR', (0,-1), (-1,-1), white)]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
drawing = Drawing(200, 100)
pie = Pie()
pie.x = 30
pie.y = self.l - 300
pie.height = 200
pie.width = 200
pie.data = g_data
pie.labels = g_labe
pie.simpleLabels = 1
pie.slices.strokeWidth = 1
pie.slices.strokeColor = black
pie.slices.label_visible = 0
pie.slices.popout = 1
#pie.labels = map(str, pie.data)
legend = Legend()
legend.x = 250
legend.y = self.l - 250
legend.dx = 8
legend.dy = 8
legend.fontName = 'Helvetica'
legend.fontSize = 7
legend.boxAnchor = 'w'
legend.columnMaximum = 10
legend.strokeWidth = 1
legend.strokeColor = black
legend.deltax = 75
legend.deltay = 10
legend.autoXPadding = 5
legend.yGap = 0
legend.dxTextSpace = 5
legend.alignment = 'right'
legend.dividerLines = 1|2|4
legend.dividerOffsY = 4.5
legend.subCols.rpad = 30
n = len(pie.data)
self.__setItems(n,pie.slices,
'fillColor',self.pdf_chart_colors)
legend.colorNamePairs = [(pie.slices[i].fillColor,
(pie.labels[i][0:20],'$%0.2f' % pie.data[i])) for i in xrange(n)]
drawing.add(pie)
drawing.add(legend)
x, y = 0, 10
renderPDF.draw(drawing, self.c, x, y, showBoundary=False)
def __setItems(self, n, obj, attr, values):
m = len(values)
i = m // n
for j in xrange(n):
setattr(obj[j],attr,values[j*i % m])
def __get_tags_statistics(self):
monto_categorias = dict()
for tra in self.transactions:
if len(tra.tags) > 0:
for tag in tra.tags:
if tag in monto_categorias.keys():
monto_categorias[tag] += tra.amount
else:
monto_categorias[tag] = tra.amount
labels = [lab.encode('utf-8') for lab in monto_categorias.keys()]
data = monto_categorias.values()
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.l = 600
self.c.setFont('Courier', 14)
self.c.drawString(30, 800, 'Categorias')
drawing = Drawing(200, 200)
pie = Pie()
pie.x = 30
pie.y = self.l - 130
pie.height = 300
pie.width = 300
pie.data = data
pie.labels = labels
pie.simpleLabels = 1
pie.slices.strokeWidth = 1
pie.slices.strokeColor = black
pie.slices.label_visible = 0
legend = Legend()
legend.x = 400
legend.y = self.l
legend.dx = 8
legend.dy = 8
legend.fontName = 'Helvetica'
legend.fontSize = 7
legend.boxAnchor = 'w'
legend.columnMaximum = 10
legend.strokeWidth = 1
legend.strokeColor = black
legend.deltax = 75
legend.deltay = 10
legend.autoXPadding = 5
legend.yGap = 0
legend.dxTextSpace = 5
legend.alignment = 'right'
legend.dividerLines = 1|2|4
legend.dividerOffsY = 4.5
legend.subCols.rpad = 30
n = len(pie.data)
self.__setItems(n,pie.slices,
'fillColor',self.pdf_chart_colors)
legend.colorNamePairs = [(pie.slices[i].fillColor,
(pie.labels[i][0:20],'$%0.2f' % pie.data[i])) for i in xrange(n)]
drawing.add(pie)
drawing.add(legend)
x, y = 0, 10
renderPDF.draw(drawing, self.c, x, y, showBoundary=False)
def __budgets_spent(self):
self.l = 800
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Budgets')
header = ['Nombre', 'Gastado', 'Balance', 'Limite']
data = [header]
for bud in self.budgets:
data.append([bud.name, bud.spent,
bud.balance, bud.limit])
self.l -= len(data) * 19
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
def __reminders(self):
self.l = 800
p = PageBreak()
p.drawOn(self.c, 0, 1000)
self.c.showPage()
self.c.setFont('Courier', 14)
self.c.drawString(30, self.l, 'Recordatorio de pagos')
header = ['Fecha', 'Descripcion', 'Monto']
data = [header]
for rem in self.reminders:
data.append([rem.start_date, rem.description,
rem.amount])
self.l -= len(data) * 19
t = Table(data)
t.setStyle(TableStyle([('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('FONTNAME', (0,0), (-1,0), 'Courier-Bold'),
('BACKGROUND', (0,0), (-1,0), HexColor('#efeded')),
('BACKGROUND', (0,0), (0,-1), HexColor('#efeded')),
('FONTSIZE', (0,0), (-1,0), 12),
('FONTSIZE', (0,1), (-1,-1), 8),
('FONTNAME', (0,1), (-1,-1), 'Courier')]))
t.wrapOn(self.c, 30, self.l)
t.drawOn(self.c, 30, self.l)
def generate_report(self):
self.__prepare_document()
self.__generate_header()
self.__accounts_amount()
self.__add_graph()
self.__transactions()
self.__get_tags_statistics()
self.__per_account_statistic()
self.__budgets_spent()
self.__reminders()
self.c.showPage()
self.c.save()
| gpl-2.0 | -7,674,272,941,585,014,000 | 32.537549 | 81 | 0.478197 | false | 3.387902 | false | false | false |
stephanie-wang/ray | python/ray/tune/suggest/variant_generator.py | 1 | 8452 | import copy
import logging
import numpy
import random
from ray.tune import TuneError
from ray.tune.sample import sample_from
logger = logging.getLogger(__name__)
def generate_variants(unresolved_spec):
"""Generates variants from a spec (dict) with unresolved values.
There are two types of unresolved values:
Grid search: These define a grid search over values. For example, the
following grid search values in a spec will produce six distinct
variants in combination:
"activation": grid_search(["relu", "tanh"])
"learning_rate": grid_search([1e-3, 1e-4, 1e-5])
Lambda functions: These are evaluated to produce a concrete value, and
can express dependencies or conditional distributions between values.
They can also be used to express random search (e.g., by calling
into the `random` or `np` module).
"cpu": lambda spec: spec.config.num_workers
"batch_size": lambda spec: random.uniform(1, 1000)
Finally, to support defining specs in plain JSON / YAML, grid search
and lambda functions can also be defined alternatively as follows:
"activation": {"grid_search": ["relu", "tanh"]}
"cpu": {"eval": "spec.config.num_workers"}
Use `format_vars` to format the returned dict of hyperparameters.
Yields:
(Dict of resolved variables, Spec object)
"""
for resolved_vars, spec in _generate_variants(unresolved_spec):
assert not _unresolved_values(spec)
yield resolved_vars, spec
def grid_search(values):
"""Convenience method for specifying grid search over a value.
Arguments:
values: An iterable whose parameters will be gridded.
"""
return {"grid_search": values}
_STANDARD_IMPORTS = {
"random": random,
"np": numpy,
}
_MAX_RESOLUTION_PASSES = 20
def resolve_nested_dict(nested_dict):
"""Flattens a nested dict by joining keys into tuple of paths.
Can then be passed into `format_vars`.
"""
res = {}
for k, v in nested_dict.items():
if isinstance(v, dict):
for k_, v_ in resolve_nested_dict(v).items():
res[(k, ) + k_] = v_
else:
res[(k, )] = v
return res
def format_vars(resolved_vars):
"""Formats the resolved variable dict into a single string."""
out = []
for path, value in sorted(resolved_vars.items()):
if path[0] in ["run", "env", "resources_per_trial"]:
continue # TrialRunner already has these in the experiment_tag
pieces = []
last_string = True
for k in path[::-1]:
if isinstance(k, int):
pieces.append(str(k))
elif last_string:
last_string = False
pieces.append(k)
pieces.reverse()
out.append(_clean_value("_".join(pieces)) + "=" + _clean_value(value))
return ",".join(out)
def flatten_resolved_vars(resolved_vars):
"""Formats the resolved variable dict into a mapping of (str -> value)."""
flattened_resolved_vars_dict = {}
for pieces, value in resolved_vars.items():
if pieces[0] == "config":
pieces = pieces[1:]
pieces = [str(piece) for piece in pieces]
flattened_resolved_vars_dict["/".join(pieces)] = value
return flattened_resolved_vars_dict
def _clean_value(value):
if isinstance(value, float):
return "{:.5}".format(value)
else:
return str(value).replace("/", "_")
def _generate_variants(spec):
spec = copy.deepcopy(spec)
unresolved = _unresolved_values(spec)
if not unresolved:
yield {}, spec
return
grid_vars = []
lambda_vars = []
for path, value in unresolved.items():
if callable(value):
lambda_vars.append((path, value))
else:
grid_vars.append((path, value))
grid_vars.sort()
grid_search = _grid_search_generator(spec, grid_vars)
for resolved_spec in grid_search:
resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)
for resolved, spec in _generate_variants(resolved_spec):
for path, value in grid_vars:
resolved_vars[path] = _get_value(spec, path)
for k, v in resolved.items():
if (k in resolved_vars and v != resolved_vars[k]
and _is_resolved(resolved_vars[k])):
raise ValueError(
"The variable `{}` could not be unambiguously "
"resolved to a single value. Consider simplifying "
"your configuration.".format(k))
resolved_vars[k] = v
yield resolved_vars, spec
def _assign_value(spec, path, value):
for k in path[:-1]:
spec = spec[k]
spec[path[-1]] = value
def _get_value(spec, path):
for k in path:
spec = spec[k]
return spec
def _resolve_lambda_vars(spec, lambda_vars):
resolved = {}
error = True
num_passes = 0
while error and num_passes < _MAX_RESOLUTION_PASSES:
num_passes += 1
error = False
for path, fn in lambda_vars:
try:
value = fn(_UnresolvedAccessGuard(spec))
except RecursiveDependencyError as e:
error = e
except Exception:
raise ValueError(
"Failed to evaluate expression: {}: {}".format(path, fn))
else:
_assign_value(spec, path, value)
resolved[path] = value
if error:
raise error
return resolved
def _grid_search_generator(unresolved_spec, grid_vars):
value_indices = [0] * len(grid_vars)
def increment(i):
value_indices[i] += 1
if value_indices[i] >= len(grid_vars[i][1]):
value_indices[i] = 0
if i + 1 < len(value_indices):
return increment(i + 1)
else:
return True
return False
if not grid_vars:
yield unresolved_spec
return
while value_indices[-1] < len(grid_vars[-1][1]):
spec = copy.deepcopy(unresolved_spec)
for i, (path, values) in enumerate(grid_vars):
_assign_value(spec, path, values[value_indices[i]])
yield spec
if grid_vars:
done = increment(0)
if done:
break
def _is_resolved(v):
resolved, _ = _try_resolve(v)
return resolved
def _try_resolve(v):
if isinstance(v, sample_from):
# Function to sample from
return False, v.func
elif isinstance(v, dict) and len(v) == 1 and "eval" in v:
# Lambda function in eval syntax
return False, lambda spec: eval(
v["eval"], _STANDARD_IMPORTS, {"spec": spec})
elif isinstance(v, dict) and len(v) == 1 and "grid_search" in v:
# Grid search values
grid_values = v["grid_search"]
if not isinstance(grid_values, list):
raise TuneError(
"Grid search expected list of values, got: {}".format(
grid_values))
return False, grid_values
return True, v
def _unresolved_values(spec):
found = {}
for k, v in spec.items():
resolved, v = _try_resolve(v)
if not resolved:
found[(k, )] = v
elif isinstance(v, dict):
# Recurse into a dict
for (path, value) in _unresolved_values(v).items():
found[(k, ) + path] = value
elif isinstance(v, list):
# Recurse into a list
for i, elem in enumerate(v):
for (path, value) in _unresolved_values({i: elem}).items():
found[(k, ) + path] = value
return found
class _UnresolvedAccessGuard(dict):
def __init__(self, *args, **kwds):
super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)
self.__dict__ = self
def __getattribute__(self, item):
value = dict.__getattribute__(self, item)
if not _is_resolved(value):
raise RecursiveDependencyError(
"`{}` recursively depends on {}".format(item, value))
elif isinstance(value, dict):
return _UnresolvedAccessGuard(value)
else:
return value
class RecursiveDependencyError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
| apache-2.0 | 5,152,314,125,364,362,000 | 29.846715 | 78 | 0.57442 | false | 4.045955 | false | false | false |
rwl/PyCIM | CIM14/IEC61968/AssetModels/ToWindingSpec.py | 1 | 6782 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ToWindingSpec(IdentifiedObject):
"""For short-circuit tests, specifies the winding and tap for all short-circuited windings. For open-circuit tests, specifies the winding, tap, induced voltage, and induced angle for any non-excited windings that were measured during the test. This won't apply if only the exciting current and no-load losses were measured.
"""
def __init__(self, voltage=0.0, phaseShift=0.0, toTapStep=0, ToWinding=None, OpenCircuitTests=None, ShortCircuitTests=None, *args, **kw_args):
"""Initialises a new 'ToWindingSpec' instance.
@param voltage: (if open-circuit test) Voltage measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
@param phaseShift: (if open-circuit test) Phase shift measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
@param toTapStep: Tap step number for the 'to' winding of the test pair.
@param ToWinding: Winding short-circuited in a short-circuit test, or measured for induced voltage and angle in an open-circuit test.
@param OpenCircuitTests: All open-circuit tests in which this winding was measured.
@param ShortCircuitTests: All short-circuit tests in which this winding was short-circuited.
"""
#: (if open-circuit test) Voltage measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
self.voltage = voltage
#: (if open-circuit test) Phase shift measured at the open-circuited 'to' winding, with the 'from' winding set to the 'from' winding's rated voltage and all other windings open-circuited.
self.phaseShift = phaseShift
#: Tap step number for the 'to' winding of the test pair.
self.toTapStep = toTapStep
self._ToWinding = None
self.ToWinding = ToWinding
self._OpenCircuitTests = []
self.OpenCircuitTests = [] if OpenCircuitTests is None else OpenCircuitTests
self._ShortCircuitTests = []
self.ShortCircuitTests = [] if ShortCircuitTests is None else ShortCircuitTests
super(ToWindingSpec, self).__init__(*args, **kw_args)
_attrs = ["voltage", "phaseShift", "toTapStep"]
_attr_types = {"voltage": float, "phaseShift": float, "toTapStep": int}
_defaults = {"voltage": 0.0, "phaseShift": 0.0, "toTapStep": 0}
_enums = {}
_refs = ["ToWinding", "OpenCircuitTests", "ShortCircuitTests"]
_many_refs = ["OpenCircuitTests", "ShortCircuitTests"]
def getToWinding(self):
"""Winding short-circuited in a short-circuit test, or measured for induced voltage and angle in an open-circuit test.
"""
return self._ToWinding
def setToWinding(self, value):
if self._ToWinding is not None:
filtered = [x for x in self.ToWinding.ToWindingSpecs if x != self]
self._ToWinding._ToWindingSpecs = filtered
self._ToWinding = value
if self._ToWinding is not None:
if self not in self._ToWinding._ToWindingSpecs:
self._ToWinding._ToWindingSpecs.append(self)
ToWinding = property(getToWinding, setToWinding)
def getOpenCircuitTests(self):
"""All open-circuit tests in which this winding was measured.
"""
return self._OpenCircuitTests
def setOpenCircuitTests(self, value):
for p in self._OpenCircuitTests:
filtered = [q for q in p.MeasuredWindingSpecs if q != self]
self._OpenCircuitTests._MeasuredWindingSpecs = filtered
for r in value:
if self not in r._MeasuredWindingSpecs:
r._MeasuredWindingSpecs.append(self)
self._OpenCircuitTests = value
OpenCircuitTests = property(getOpenCircuitTests, setOpenCircuitTests)
def addOpenCircuitTests(self, *OpenCircuitTests):
for obj in OpenCircuitTests:
if self not in obj._MeasuredWindingSpecs:
obj._MeasuredWindingSpecs.append(self)
self._OpenCircuitTests.append(obj)
def removeOpenCircuitTests(self, *OpenCircuitTests):
for obj in OpenCircuitTests:
if self in obj._MeasuredWindingSpecs:
obj._MeasuredWindingSpecs.remove(self)
self._OpenCircuitTests.remove(obj)
def getShortCircuitTests(self):
"""All short-circuit tests in which this winding was short-circuited.
"""
return self._ShortCircuitTests
def setShortCircuitTests(self, value):
for p in self._ShortCircuitTests:
filtered = [q for q in p.ShortedWindingSpecs if q != self]
self._ShortCircuitTests._ShortedWindingSpecs = filtered
for r in value:
if self not in r._ShortedWindingSpecs:
r._ShortedWindingSpecs.append(self)
self._ShortCircuitTests = value
ShortCircuitTests = property(getShortCircuitTests, setShortCircuitTests)
def addShortCircuitTests(self, *ShortCircuitTests):
for obj in ShortCircuitTests:
if self not in obj._ShortedWindingSpecs:
obj._ShortedWindingSpecs.append(self)
self._ShortCircuitTests.append(obj)
def removeShortCircuitTests(self, *ShortCircuitTests):
for obj in ShortCircuitTests:
if self in obj._ShortedWindingSpecs:
obj._ShortedWindingSpecs.remove(self)
self._ShortCircuitTests.remove(obj)
| mit | 7,964,606,802,234,008,000 | 48.867647 | 328 | 0.696697 | false | 3.769872 | true | false | false |
walterbender/infoslicer | infoslicer/widgets/Gallery_View.py | 1 | 8660 | # Copyright (C) IBM Corporation 2008
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GdkPixbuf
import os
import cPickle
import logging
from Editable_Textbox import Editable_Textbox
from infoslicer.processing.Article_Data import *
from infoslicer.processing.Article import Article
import book
logger = logging.getLogger('infoslicer')
class Gallery_View( Gtk.HBox ):
"""
Created by Christopher Leonard
Drag-and-drop methods added by Jonathan Mace
The gallery view acts in the same was as the Reading_View
except instead of displaying the text of an article, it
displays the images associated with that article, in a scrollable display.
Drag-and-drop methods have been added to set up the images as a
drag source. The data returned by drag-data-get will be a list
containing an Image_Data object and a Sentence_Data object. These
correspond to the image and caption respectively.
"""
def __init__(self):
self.image_list = []
GObject.GObject.__init__(self)
self.set_size_request(int(Gdk.Screen.width() / 2), -1)
self.current_index = -1
left_button = Gtk.Button(label="\n\n << \n\n")
right_button = Gtk.Button(label="\n\n >> \n\n")
self.imagenumberlabel = Gtk.Label()
self.image = Gtk.Image()
self.imagebox = Gtk.EventBox()
self.imagebox.add(self.image)
self.imagebox.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
self.imagebox.drag_source_add_image_targets()
self.imagebox.connect("drag-begin", self.drag_begin_event, None)
logging.debug('##################### Gallery_View.connect')
self.imagebox.connect("drag-data-get", self.drag_data_get_event, None)
self.caption = Gtk.Label(label="")
self.caption.set_size_request(int(Gdk.Screen.width() / 3), -1)
self.caption.set_line_wrap(True)
self.caption.set_max_width_chars(40)
self.image_drag_container = Gtk.VBox()
self.image_drag_container.pack_start(self.imagenumberlabel, expand=False,
fill=False, padding=0)
self.image_drag_container.pack_start(self.imagebox, False, False, 0)
self.image_drag_container.pack_start(self.caption, False, False, 0)
image_container = Gtk.VBox()
image_container.pack_start(Gtk.Label(" "), True, True, 0)
image_container.pack_start(self.image_drag_container, False, True, 0)
image_container.pack_start(Gtk.Label(" "), True, True, 0)
left_button_container = Gtk.VBox()
left_button_container.pack_start(Gtk.Label(" "), True, True, 0)
left_button_container.pack_start(left_button, False, True, 0)
left_button_container.pack_start(Gtk.Label(" "), True, True, 0)
right_button_container = Gtk.VBox()
right_button_container.pack_start(Gtk.Label(" "), True, True, 0)
right_button_container.pack_start(right_button, False, True, 0)
right_button_container.pack_start(Gtk.Label(" "), True, True, 0)
self.pack_start(left_button_container, False, True, 0)
self.pack_start(image_container, True, True, 0)
self.pack_start(right_button_container, False, True, 0)
self._source_article = None
self.show_all()
right_button.connect("clicked", self.get_next_item, None)
left_button.connect("clicked", self.get_prev_item, None)
self.get_next_item(right_button, None)
self.source_article_id = 0
def get_next_item(self, button, param):
if self.image_list == []:
if self._source_article and self._source_article.article_title:
self.caption.set_text("This article does not have any images")
else:
self.caption.set_text("Please select a Wikipedia article from the menu above")
self.image.clear()
return
self.current_index += 1
if self.current_index == len(self.image_list):
self.current_index = 0
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def get_prev_item(self, button, param):
if self.image_list == []:
if self._source_article and self._source_article.article_title:
self.caption.set_text("This article does not have any images")
else:
self.caption.set_text("Please select a Wikipedia article from the menu above")
self.image.clear()
return
if self.current_index == 0:
self.current_index = len(self.image_list)
self.current_index -= 1
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def get_first_item(self):
if self.image_list == []:
if self._source_article and self._source_article.article_title:
self.caption.set_text("This article does not have any images")
else:
self.caption.set_text("Please select a Wikipedia article from the menu above")
self.image.clear()
return
self.current_index = 0
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
logger.debug("setting text to:")
logger.debug("(%d / %d)\n" %
(self.current_index+1, len(self.image_list)))
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def set_image_list(self, image_list):
logger.debug("validagting image list")
self.image_list = _validate_image_list(book.wiki.root, image_list)
logger.debug(self.image_list)
def drag_begin_event(self, widget, context, data):
logging.debug('########### Gallery_View.drag_begin_event called')
self.imagebox.drag_source_set_icon_pixbuf(self.imagebuf)
def drag_data_get_event(self, widget, context, selection_data, info, timestamp, data):
logger.debug('############# Gallery_View.drag_data_get_event')
atom = Gdk.atom_intern("section", only_if_exists=False)
imagedata = Picture_Data(self.source_article_id,
self.image_list[self.current_index][0],
self.image_list[self.current_index][2])
captiondata = Sentence_Data(0, self.source_article_id, 0, 0, 0, self.image_list[self.current_index][1])
paragraph1data = Paragraph_Data(0, self.source_article_id, 0, 0, [imagedata])
paragraph2data = Paragraph_Data(0, self.source_article_id, 0, 0, [captiondata])
sectionsdata = [Section_Data(0, self.source_article_id, 0, [paragraph1data, paragraph2data])]
string = cPickle.dumps(sectionsdata)
selection_data.set(atom, 8, string)
def _validate_image_list(root, image_list):
"""
provides a mechanism for validating image lists and expanding relative paths
@param image_list: list of images to validate
@return: list of images with corrected paths, and broken images removed
"""
for i in xrange(len(image_list)):
if not os.access(image_list[i][0], os.F_OK):
if os.access(os.path.join(root, image_list[i][0]), os.F_OK):
image_list[i] = (os.path.join(root, image_list[i][0]),
image_list[i][1], image_list[i][2])
else:
image = None
#removing during for loop was unreliable
while None in image_list:
image_list.remove(None)
return image_list
| gpl-2.0 | -250,266,366,728,502,240 | 45.065217 | 111 | 0.600577 | false | 3.694539 | false | false | false |
mrchristine/dbc-notebooks | tools/pyspark_sync/workspace.py | 1 | 12134 | import base64
import argparse
import json
import requests
import sys
import os
import fnmatch
WS_LIST = "/workspace/list"
WS_STATUS = "/workspace/get-status"
WS_MKDIRS = "/workspace/mkdirs"
WS_IMPORT = "/workspace/import"
WS_EXPORT = "/workspace/export"
LS_ZONES = "/clusters/list-zones"
error_401 = """
Credentials are incorrect. Please verify the credentials passed into the APIs.
If using SSO, log out of the Databricks environment.
1. Click on the Admin login page
2. Enter your e-mail
3. Click 'Forgot my Password'
This will create a new password for you to use against the REST API. This should **not** be your SSO password
"""
class WorkspaceClient:
"""A class to define wrappers for the REST API"""
def __init__(self, host="https://myenv.cloud.databricks.com", user="admin", pwd="fakePassword", is_shared=False):
self.user = user
self.pwd = pwd
self.creds = (user, pwd)
self.host = host
self.is_shared = is_shared
self.url = host.rstrip('/') + '/api/2.0'
def get(self, endpoint, json_params={}, print_json=False):
url = self.url + endpoint
if json_params:
raw_results = requests.get(url, auth=self.creds, params=json_params)
else:
raw_results = requests.get(url, auth=self.creds)
if raw_results.status_code == 401:
print(error_401)
raise ValueError("Unauthorized error")
results = raw_results.json()
if print_json:
print(json.dumps(results, indent=4, sort_keys=True))
return results
def post(self, endpoint, json_params={}, print_json=True):
url = self.url + endpoint
if json_params:
raw_results = requests.post(url, auth=self.creds, json=json_params)
results = raw_results.json()
else:
print("Must have a payload in json_args param.")
return {}
if print_json:
print(json.dumps(results, indent=4, sort_keys=True))
# if results are empty, let's return the return status
if results:
results['http_status_code'] = raw_results.status_code
return results
else:
return {'http_status_code': raw_results.status_code}
@staticmethod
def my_map(F, items):
to_return = []
for elem in items:
to_return.append(F(elem))
return to_return
def is_file(self, path):
""" Checks if the file is a notebook or folder in Databricks"""
status = {'path': path}
resp = self.get(WS_STATUS, json_params=status)
if resp.get('error_code', None):
print(resp)
raise NameError('File does not exist in Databricks workspace.')
print("Is the path a file or folder: ")
print(resp)
if resp['object_type'] == 'DIRECTORY':
return False
return True
def get_full_path(self, in_path):
""" Get the full path of the Databricks workspace
User's can provide the relative path to push / pull from Databricks"""
path = in_path.lstrip('[\"\']').rstrip('[\"\']')
if path[0] == '/':
# return path is absolute so return here
return path
elif path[0] == '.':
full_path = '/Users/' + self.user.strip() + path[1:]
return full_path
elif str.isalnum(path[0]):
full_path = '/Users/' + self.user.strip() + '/' + path
return full_path
else:
raise ValueError('Path should start with . for relative paths or / for absolute.')
def save_single_notebook(self, fullpath):
""" Saves a single notebook from Databricks to the local directory"""
get_args = {'path': fullpath}
resp = self.get(WS_EXPORT, get_args)
# grab the relative path from the constructed full path
# this code chops of the /Users/[email protected]/ to create a local reference
save_filename = '/'.join(fullpath.split('/')[3:]) + '.' + resp['file_type']
if self.is_shared:
save_filename = self.user.split("@")[0] + '/' + save_filename
save_path = os.path.dirname(save_filename)
print("Local path to save: " + save_path)
print("Saving file in local path: " + save_filename)
# If the local path doesn't exist,we create it before we save the contents
if not os.path.exists(save_path) and save_path:
os.makedirs(save_path)
with open(save_filename, "wb") as f:
f.write(base64.b64decode(resp['content']))
def get_all_notebooks(self, fullpath):
""" Recursively list all notebooks within the folder"""
get_args = {'path': fullpath}
items = self.get(WS_LIST, get_args)['objects']
folders = list(self.my_map(lambda y: y.get('path', None),
filter(lambda x: x.get('object_type', None) == 'DIRECTORY', items)))
notebooks = list(self.my_map(lambda y: y.get('path', None),
filter(lambda x: x.get('object_type', None) == 'NOTEBOOK', items)))
print('DIRECTORIES: ' + str(folders))
print('NOTEBOOKS: ' + str(notebooks))
if folders == [] and notebooks == []:
print('Folder does not contain any notebooks')
return []
# save the notebooks with the current method
if notebooks:
self.my_map(lambda y: self.save_single_notebook(y), notebooks)
if folders:
nested_list_notebooks = list(self.my_map(lambda y: self.get_all_notebooks(y), folders))
flatten_list = [item for sublist in nested_list_notebooks for item in sublist]
return notebooks + flatten_list
return notebooks
def save_folder(self, fullpath):
""" We will save the notebooks within the paths, and exclude Library links """
list_of_notebooks = self.get_all_notebooks(fullpath)
return list_of_notebooks
# Run map of save_single_notebook across list of notebooks
def pull(self, path):
# get_args = "/Users/[email protected]/demo/reddit/Reddit SQL Analysis"
cur_path = self.get_full_path(path)
# pull the file or archive
if self.is_file(cur_path):
self.save_single_notebook(cur_path)
else:
self.save_folder(cur_path)
@staticmethod
def _parse_extension(src_path):
supported = ['scala', 'py', 'r', 'sql']
ext = src_path.split('.')[-1]
if ext == 'scala':
return {'language': 'SCALA'}
elif ext == 'py':
return {'language': 'PYTHON'}
elif ext == 'ipynb':
return {'format': 'JUPYTER'}
elif ext == 'r':
return {'language': 'R'}
elif ext == 'sql':
return {'language': 'SQL'}
elif ext == 'txt':
return {'language': 'SQL'}
else:
raise ValueError('Unsupported file format: %s. Supported formats are: ' % ext +
'[%s].' % ', '.join(supported))
def push_file(self, local_path, dst_folder = None):
"""Push a single file to DBC
This assumes the local path matches the Databricks workspace"""
# get the databricks path using the users hostname
if self.is_shared:
username = self.user.split('@')[0]
tmp_path = '/Users/' + self.user.strip() + '/' + local_path.lstrip('./').replace(username + '/', "")
elif dst_folder:
tmp_path = '/Users/' + self.user.strip() + '/' + dst_folder.replace('/', '') + '/' + local_path.lstrip('./')
else:
tmp_path = '/Users/' + self.user.strip() + '/' + local_path.lstrip('./')
overwrite = True
dirname = os.path.dirname(tmp_path)
dbc_path, file_ext = os.path.splitext(tmp_path)
data = open(local_path, 'r').read()
create_notebook = {
"path": dbc_path,
"content": base64.b64encode(data.encode('utf-8')).decode(),
"overwrite": overwrite
}
create_notebook.update(self._parse_extension(local_path))
# create a folder, if exists then it succeeds as well
folder_resp = self.post(WS_MKDIRS, {'path': dirname}, False)
# import the notebook
resp = self.post(WS_IMPORT, create_notebook, False)
print("Push Notebook: " + dbc_path)
print(resp)
@staticmethod
def find_all_file_paths(local_dir):
matches = []
supported = ['scala', 'py', 'r', 'sql']
for root, dirnames, filenames in os.walk(local_dir):
for ext in supported:
for filename in fnmatch.filter(filenames, '*.' + ext):
matches.append(os.path.join(root, filename))
return matches
def push_folder(self, local_path):
""" Find all source files first, grab all the folders, batch create folders, push notebooks"""
file_list = self.find_all_file_paths(local_path)
cwd = os.getcwd()
file_list_rel_path = list(self.my_map(lambda x: x.replace(cwd, "."), file_list))
for fname in file_list_rel_path:
self.push_file(fname)
return file_list_rel_path
def push(self, path):
if path[0] == '/':
raise ValueError("Path should be relative to your git repo home dir and start with ./ or with folder name")
if os.path.isfile(path):
self.push_file(path)
else:
self.push_folder(path)
if __name__ == '__main__':
debug = False
parser = argparse.ArgumentParser(description="""
Sync Databricks workspace to/from local directory for git support.
e.g.
$ python workspaces.py pull demo/reddit/
$ python workspaces.py push demo/reddit/
Or
$ python workspaces.py pull --host='https://myenv.cloud.databricks.com/ [email protected] --password=HAHAHA
I personally use the environment variables to store this information
DBC_HOST
DBC_USERNAME
DBC_PASSWORD
DBC_SHARED
DBC_SHARED is set to true if the single repo needs to host multiple home directories.
It creates a local directory from the users e-mail
""")
# subparser for mutually exclusive arguments
sp = parser.add_subparsers(dest='action')
sp_push = sp.add_parser('push', help='Push path to Databricks workspace')
sp_pull = sp.add_parser('pull', help='Pull workspace from Databricks to local directory')
parser.add_argument('--user', dest='user', help='Username for the Databricks env')
parser.add_argument('--password', dest='password', help='Password for the Databricks env')
parser.add_argument('--host', dest='host', help='Password for the Databricks env')
parser.add_argument('--shared', dest='shared', action='store_true',
help='Boolean to notify if this is a \
shared repo to add a username prefix to the directories')
parser.add_argument('path', type=str,
help='The path/directory in Databricks or locally to sync')
args = parser.parse_args()
# the arguments
user = args.user
host = args.host
password = args.password
is_shared = args.shared
if not host:
host = os.environ.get('DBC_HOST')
if not user:
user = os.environ.get('DBC_USERNAME')
if not password:
password = os.environ.get('DBC_PASSWORD')
if not is_shared:
is_shared = bool(os.environ.get('DBC_SHARED'))
helper = WorkspaceClient(host, user, password, is_shared)
if debug:
print("ACTION IS: " + args.action)
print("PATH IS: " + args.path)
print("USER IS: " + user)
print("PASS IS: " + "I_DONT_PRINT_PASSWORDS")
print("HOST IS: " + host)
if args.path is None:
print("Need path")
exit(0)
else:
input_path = args.path
if args.action.lower() == "push":
helper.push(input_path)
elif args.action.lower() == "pull":
helper.pull(input_path)
else:
print("Push / pull are only supported as the action.")
| apache-2.0 | -2,087,793,057,794,017,300 | 39.178808 | 120 | 0.589171 | false | 3.853287 | false | false | false |
tkanemoto/unittest-xml-reporting | xmlrunner/result.py | 1 | 18019 |
import os
import sys
import time
import traceback
import six
import re
from os import path
from six import unichr
from six.moves import StringIO
from .unittest import TestResult, _TextTestResult
# Matches invalid XML1.0 unicode characters, like control characters:
# http://www.w3.org/TR/2006/REC-xml-20060816/#charsets
# http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python
_illegal_unichrs = [
(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F),
(0x7F, 0x84), (0x86, 0x9F),
(0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF),
]
if sys.maxunicode >= 0x10000: # not narrow build
_illegal_unichrs.extend([
(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF),
(0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),
(0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),
(0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),
(0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),
(0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF),
])
_illegal_ranges = [
"%s-%s" % (unichr(low), unichr(high))
for (low, high) in _illegal_unichrs
]
INVALID_XML_1_0_UNICODE_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
def xml_safe_unicode(base, encoding='utf-8'):
"""Return a unicode string containing only valid XML characters.
encoding - if base is a byte string it is first decoded to unicode
using this encoding.
"""
if isinstance(base, six.binary_type):
base = base.decode(encoding)
return INVALID_XML_1_0_UNICODE_RE.sub('', base)
def to_unicode(data):
"""Returns unicode in Python2 and str in Python3"""
if six.PY3:
return six.text_type(data)
try:
# Try utf8
return six.text_type(data)
except UnicodeDecodeError:
return repr(data).decode('utf8', 'replace')
def safe_unicode(data, encoding=None):
return xml_safe_unicode(to_unicode(data), encoding)
def testcase_name(test_method):
testcase = type(test_method)
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
result = module + testcase.__name__
return result
class _TestInfo(object):
"""
This class keeps useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR, SKIP) = range(4)
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None, subTest=None):
self.test_result = test_result
self.outcome = outcome
self.elapsed_time = 0
self.err = err
self.stdout = test_result._stdout_data
self.stderr = test_result._stderr_data
self.test_description = self.test_result.getDescription(test_method)
self.test_exception_info = (
'' if outcome in (self.SUCCESS, self.SKIP)
else self.test_result._exc_info_to_string(
self.err, test_method)
)
self.test_name = testcase_name(test_method)
self.test_id = test_method.id()
if subTest:
self.test_id = subTest.id()
def id(self):
return self.test_id
def test_finished(self):
"""Save info that can only be calculated once a test has run.
"""
self.elapsed_time = \
self.test_result.stop_time - self.test_result.start_time
def get_description(self):
"""
Return a text representation of the test method.
"""
return self.test_description
def get_error_info(self):
"""
Return a text representation of an exception thrown by a test
method.
"""
return self.test_exception_info
class _XMLTestResult(_TextTestResult):
"""
A test result class that can express test results in a XML report.
Used by XMLTestRunner.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
elapsed_times=True, properties=None):
_TextTestResult.__init__(self, stream, descriptions, verbosity)
self.buffer = True # we are capturing test output
self._stdout_data = None
self._stderr_data = None
self.successes = []
self.callback = None
self.elapsed_times = elapsed_times
self.properties = None # junit testsuite properties
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""
Appends a _TestInfo to the given target list and sets a callback
method to be called by stopTest method.
"""
target_list.append(test_info)
def callback():
"""Prints the test method outcome to the stream, as well as
the elapsed time.
"""
test_info.test_finished()
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
if self.showAll:
self.stream.writeln(
'%s (%.3fs)' % (verbose_str, test_info.elapsed_time)
)
elif self.dots:
self.stream.write(short_str)
self.callback = callback
def startTest(self, test):
"""
Called before execute each test method.
"""
self.start_time = time.time()
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
def _save_output_data(self):
# Only try to get sys.stdout and sys.sterr as they not be
# StringIO yet, e.g. when test fails during __call__
try:
self._stdout_data = sys.stdout.getvalue()
self._stderr_data = sys.stderr.getvalue()
except AttributeError:
pass
def stopTest(self, test):
"""
Called after execute each test method.
"""
self._save_output_data()
# self._stdout_data = sys.stdout.getvalue()
# self._stderr_data = sys.stderr.getvalue()
_TextTestResult.stopTest(self, test)
self.stop_time = time.time()
if self.callback and callable(self.callback):
self.callback()
self.callback = None
def addSuccess(self, test):
"""
Called when a test executes successfully.
"""
self._save_output_data()
self._prepare_callback(
_TestInfo(self, test), self.successes, 'OK', '.'
)
def addFailure(self, test, err):
"""
Called when a test method fails.
"""
self._save_output_data()
testinfo = _TestInfo(self, test, _TestInfo.FAILURE, err)
self.failures.append((
testinfo,
self._exc_info_to_string(err, test)
))
self._prepare_callback(testinfo, [], 'FAIL', 'F')
def addError(self, test, err):
"""
Called when a test method raises an error.
"""
self._save_output_data()
testinfo = _TestInfo(self, test, _TestInfo.ERROR, err)
self.errors.append((
testinfo,
self._exc_info_to_string(err, test)
))
self._prepare_callback(testinfo, [], 'ERROR', 'E')
def addSubTest(self, testcase, test, err):
"""
Called when a subTest method raises an error.
"""
if err is not None:
self._save_output_data()
testinfo = _TestInfo(self, testcase, _TestInfo.ERROR, err, subTest=test)
self.errors.append((
testinfo,
self._exc_info_to_string(err, testcase)
))
self._prepare_callback(testinfo, [], 'ERROR', 'E')
def addSkip(self, test, reason):
"""
Called when a test method was skipped.
"""
self._save_output_data()
testinfo = _TestInfo(self, test, _TestInfo.SKIP, reason)
self.skipped.append((testinfo, reason))
self._prepare_callback(testinfo, [], 'SKIP', 'S')
def printErrorList(self, flavour, errors):
"""
Writes information about the FAIL or ERROR to the stream.
"""
for test_info, error in errors:
self.stream.writeln(self.separator1)
self.stream.writeln(
'%s [%.3fs]: %s' % (flavour, test_info.elapsed_time,
test_info.get_description())
)
self.stream.writeln(self.separator2)
self.stream.writeln('%s' % test_info.get_error_info())
def _get_info_by_testcase(self):
"""
Organizes test results by TestCase module. This information is
used during the report generation, where a XML report will be created
for each TestCase.
"""
tests_by_testcase = {}
for tests in (self.successes, self.failures, self.errors,
self.skipped):
for test_info in tests:
if isinstance(test_info, tuple):
# This is a skipped, error or a failure test case
test_info = test_info[0]
testcase_name = test_info.test_name
if testcase_name not in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
return tests_by_testcase
def _report_testsuite_properties(xml_testsuite, xml_document, properties):
xml_properties = xml_document.createElement('properties')
xml_testsuite.appendChild(xml_properties)
if properties:
for key, value in properties.items():
prop = xml_document.createElement('property')
prop.setAttribute('name', str(key))
prop.setAttribute('value', str(value))
xml_properties.appendChild(prop)
return xml_properties
_report_testsuite_properties = staticmethod(_report_testsuite_properties)
def _report_testsuite(suite_name, tests, xml_document, parentElement,
properties):
"""
Appends the testsuite section to the XML document.
"""
testsuite = xml_document.createElement('testsuite')
parentElement.appendChild(testsuite)
testsuite.setAttribute('name', suite_name)
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute(
'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))
)
failures = filter(lambda e: e.outcome == _TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(list(failures))))
errors = filter(lambda e: e.outcome == _TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(list(errors))))
_XMLTestResult._report_testsuite_properties(
testsuite, xml_document, properties)
systemout = xml_document.createElement('system-out')
testsuite.appendChild(systemout)
stdout = StringIO()
for test in tests:
# Merge the stdout from the tests in a class
if test.stdout is not None:
stdout.write(test.stdout)
_XMLTestResult._createCDATAsections(
xml_document, systemout, stdout.getvalue())
systemerr = xml_document.createElement('system-err')
testsuite.appendChild(systemerr)
stderr = StringIO()
for test in tests:
# Merge the stderr from the tests in a class
if test.stderr is not None:
stderr.write(test.stderr)
_XMLTestResult._createCDATAsections(
xml_document, systemerr, stderr.getvalue())
return testsuite
_report_testsuite = staticmethod(_report_testsuite)
def _test_method_name(test_id):
"""
Returns the test method name.
"""
return test_id.split('.')[-1]
_test_method_name = staticmethod(_test_method_name)
def _createCDATAsections(xmldoc, node, text):
text = safe_unicode(text)
pos = text.find(']]>')
while pos >= 0:
tmp = text[0:pos+2]
cdata = xmldoc.createCDATASection(tmp)
node.appendChild(cdata)
text = text[pos+2:]
pos = text.find(']]>')
cdata = xmldoc.createCDATASection(text)
node.appendChild(cdata)
_createCDATAsections = staticmethod(_createCDATAsections)
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"""
Appends a testcase section to the XML document.
"""
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
testcase.setAttribute('classname', suite_name)
testcase.setAttribute(
'name', _XMLTestResult._test_method_name(test_result.test_id)
)
testcase.setAttribute('time', '%.3f' % test_result.elapsed_time)
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error', 'skipped')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
if test_result.outcome != _TestInfo.SKIP:
failure.setAttribute(
'type',
safe_unicode(test_result.err[0].__name__)
)
failure.setAttribute(
'message',
safe_unicode(test_result.err[1])
)
error_info = safe_unicode(test_result.get_error_info())
_XMLTestResult._createCDATAsections(
xml_document, failure, error_info)
else:
failure.setAttribute('type', 'skip')
failure.setAttribute('message', safe_unicode(test_result.err))
_report_testcase = staticmethod(_report_testcase)
def generate_reports(self, test_runner):
"""
Generates the XML reports to a given XMLTestRunner object.
"""
from xml.dom.minidom import Document
all_results = self._get_info_by_testcase()
outputHandledAsString = \
isinstance(test_runner.output, six.string_types)
if (outputHandledAsString and not os.path.exists(test_runner.output)):
os.makedirs(test_runner.output)
if not outputHandledAsString:
doc = Document()
testsuite = doc.createElement('testsuites')
doc.appendChild(testsuite)
parentElement = testsuite
for suite, tests in all_results.items():
if outputHandledAsString:
doc = Document()
parentElement = doc
suite_name = suite
if test_runner.outsuffix:
# not checking with 'is not None', empty means no suffix.
suite_name = '%s-%s' % (suite, test_runner.outsuffix)
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(
suite_name, tests, doc, parentElement, self.properties
)
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
xml_content = doc.toprettyxml(
indent='\t',
encoding=test_runner.encoding
)
if outputHandledAsString:
filename = path.join(
test_runner.output,
'TEST-%s.xml' % suite_name)
with open(filename, 'wb') as report_file:
report_file.write(xml_content)
if not outputHandledAsString:
# Assume that test_runner.output is a stream
test_runner.output.write(xml_content)
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
if six.PY3:
# It works fine in python 3
try:
return super(_XMLTestResult, self)._exc_info_to_string(
err, test)
except AttributeError:
# We keep going using the legacy python <= 2 way
pass
# This comes directly from python2 unittest
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
# Only try to get sys.stdout and sys.sterr as they not be
# StringIO yet, e.g. when test fails during __call__
try:
output = sys.stdout.getvalue()
except AttributeError:
output = None
try:
error = sys.stderr.getvalue()
except AttributeError:
error = None
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
# This is the extra magic to make sure all lines are str
encoding = getattr(sys.stdout, 'encoding', 'utf-8')
lines = []
for line in msgLines:
if not isinstance(line, str):
# utf8 shouldnt be hard-coded, but not sure f
line = line.encode(encoding)
lines.append(line)
return ''.join(lines)
| bsd-2-clause | 3,852,937,358,608,100,400 | 33.191651 | 99 | 0.571896 | false | 4.121455 | true | false | false |
ask/kamqp | kamqp/client_0_8/basic_message.py | 1 | 3707 | # Copyright (C) 2007-2008 Barry Pederson <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import
from .serialization import GenericContent
__all__ = ["Message"]
class Message(GenericContent):
"""A Message for use with the ``Channnel.basic_*`` methods.
:param body: string
:param children: (not supported)
Keyword properties may include:
:keyword content_type: shortstr
MIME content type
:keyword content_encoding: shortstr
MIME content encoding
:keyword application_headers: table
Message header field table, a dict with string keys,
and string | int | Decimal | datetime | dict values.
:keyword delivery_mode: octet
Non-persistent (1) or persistent (2)
:keyword priority: octet
The message priority, 0 to 9
:keyword correlation_id: shortstr
The application correlation identifier
:keyword reply_to: shortstr
The destination to reply to
:keyword expiration: shortstr
Message expiration specification
:keyword message_id: shortstr
The application message identifier
:keyword timestamp: datetime.datetime
The message timestamp
:keyword type: shortstr
The message type name
:keyword user_id: shortstr
The creating user id
:keyword app_id: shortstr
The creating application id
:keyword cluster_id: shortstr
Intra-cluster routing identifier
Unicode bodies are encoded according to the ``content_encoding``
argument. If that's None, it's set to 'UTF-8' automatically.
*Example*:
.. code-block:: python
msg = Message('hello world',
content_type='text/plain',
application_headers={'foo': 7})
"""
#: Instances of this class have these attributes, which
#: are passed back and forth as message properties between
#: client and server
PROPERTIES = [
("content_type", "shortstr"),
("content_encoding", "shortstr"),
("application_headers", "table"),
("delivery_mode", "octet"),
("priority", "octet"),
("correlation_id", "shortstr"),
("reply_to", "shortstr"),
("expiration", "shortstr"),
("message_id", "shortstr"),
("timestamp", "timestamp"),
("type", "shortstr"),
("user_id", "shortstr"),
("app_id", "shortstr"),
("cluster_id", "shortstr")]
def __init__(self, body='', children=None, **properties):
super(Message, self).__init__(**properties)
self.body = body
def __eq__(self, other):
"""Check if the properties and bodies of this message and another
message are the same.
Received messages may contain a :attr:`delivery_info` attribute,
which isn't compared.
"""
return (super(Message, self).__eq__(other) and
hasattr(other, 'body') and
self.body == other.body)
| lgpl-2.1 | -81,401,606,623,979,250 | 29.385246 | 75 | 0.642029 | false | 4.45018 | false | false | false |
openearth/aeolis-python | aeolis/gridparams.py | 1 | 6901 | '''This file is part of AeoLiS.
AeoLiS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
AeoLiS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with AeoLiS. If not, see <http://www.gnu.org/licenses/>.
AeoLiS Copyright (C) 2015 Bas Hoonhout
[email protected] [email protected]
Deltares Delft University of Technology
Unit of Hydraulic Engineering Faculty of Civil Engineering and Geosciences
Boussinesqweg 1 Stevinweg 1
2629 HVDelft 2628CN Delft
The Netherlands The Netherlands
'''
from __future__ import absolute_import, division
import logging
import numpy as np
# package modules
#from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def initialize(s, p):
'''EXPLAIN WHAT HAPPENS IN THIS FUNCTION?
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# initialize x-dimensions
s['x'][:,:] = p['xgrid_file']
# World coordinates of z-points
s['xz'][:,:] = s['x'][:,:]
# World coordinates of u-points
s['xu'][:,1:] = 0.5 * (s['xz'][:,:-1] + s['xz'][:,1:])
s['xu'][:,0] = 1.5 * s['xz'][:,0] - 0.5 * s['xz'][:,1]
# World coordinates of v-points
s['xv'][1:,:] = 0.5 * (s['xz'][:-1,:] + s['xz'][1:,:])
s['xv'][0,:] = 1.5 * s['xz'][0,:] - 0.5 * s['xz'][1,:]
# World coordinates of c-points
s['xc'][1:,1:] = 0.25 *(s['xz'][:-1,:-1] + s['xz'][:-1,1:] + s['xz'][1:,:-1] + s['xz'][1:,1:])
s['xc'][1:,0] = 0.5 * (s['xu'][:-1,0] + s['xu'][1:,0])
s['xc'][0,1:] = 0.5 * (s['xv'][0,:-1] + s['xv'][0,1:])
s['xc'][0,0] = s['xu'][0,0]
# initialize y-dimension
ny = p['ny']
if ny == 0:
s['y'][:,:] = 0.
s['yz'][:,:] = 0.
s['yu'][:,:] = 0.
s['yv'][:,:] = 0.
s['dnz'][:,:] = 1.
s['dnu'][:,:] = 1.
s['dnv'][:,:] = 1.
s['dnc'][:,:] = 1.
s['alfaz'][:,:] = 0.
else:
# initialize y-dimensions
s['y'][:,:] = p['ygrid_file']
# World coordinates of z-points
s['yz'][:,:] = s['y'][:,:] # Different from XBeach
# World coordinates of u-points
s['yu'][:,1:] = 0.5 * (s['yz'][:,:-1] + s['yz'][:,1:])
s['yu'][:,0] = 1.5 * s['yz'][:,0] - 0.5 * s['yz'][:,1]
# World coordinates of v-points
s['yv'][1:,:] = 0.5 * (s['yz'][:-1,:] + s['yz'][1:,:])
s['yv'][0,:] = 1.5 * s['yz'][0,:] - 0.5 * s['yz'][1,:]
# World coordinates of c-points
s['yc'][1:,1:] = 0.25 *(s['yz'][:-1,:-1] + s['yz'][:-1,1:] + s['yz'][1:,:-1] + s['yz'][1:,1:])
s['yc'][0,1:] = 0.5 * (s['yv'][0,:-1] + s['yv'][0,1:])
s['yc'][1:,0] = 0.5 * (s['yu'][:-1,0] + s['yu'][1:,0])
s['yc'][0,0] = s['yv'][0,0]
# Distances in n-direction
s['dnz'][:-1,:] = ((s['yv'][:-1,:]-s['yv'][1:,:])**2.+(s['xv'][:-1,:]-s['xv'][1:,:])**2.)**0.5
s['dnu'][1:,:] = ((s['xc'][:-1,:]-s['xc'][1:,:])**2.+(s['yc'][:-1,:]-s['yc'][1:,:])**2.)**0.5
s['dnv'][1:,:] = ((s['xz'][:-1,:]-s['xz'][1:,:])**2.+(s['yz'][:-1,:]-s['yz'][1:,:])**2.)**0.5
s['dnc'][1:,:] = ((s['xu'][:-1,:]-s['xu'][1:,:])**2.+(s['yu'][:-1,:]-s['yu'][1:,:])**2.)**0.5
s['dnz'][-1,:] = s['dnz'][-2,:]
s['dnu'][0,:] = s['dnu'][1,:]
s['dnv'][0,:] = s['dnv'][1,:]
s['dnc'][0,:] = s['dnc'][1,:]
# Distances in s-direction
s['dsz'][:,:-1] = ((s['xu'][:,:-1]-s['xu'][:,1:])**2.+(s['yu'][:,:-1]-s['yu'][:,1:])**2.)**0.5
s['dsu'][:,1:] = ((s['xz'][:,:-1]-s['xz'][:,1:])**2.+(s['yz'][:,:-1]-s['yz'][:,1:])**2.)**0.5
s['dsv'][:,1:] = ((s['xc'][:,:-1]-s['xc'][:,1:])**2.+(s['yc'][:,:-1]-s['yc'][:,1:])**2.)**0.5
s['dsc'][:,1:] = ((s['xv'][:,:-1]-s['xv'][:,1:])**2.+(s['yv'][:,:-1]-s['yv'][:,1:])**2.)**0.5
s['dsz'][:,-1] = s['dsz'][:,-2]
s['dsu'][:,0] = s['dsu'][:,1]
s['dsv'][:,0] = s['dsv'][:,1]
s['dsc'][:,0] = s['dsc'][:,1]
# # Distances diagonal in sn-direction (a)
# s['dsnca'][1:,1:] = ((s['xz'][:-1,:-1]-s['xz'][1:,1:])**2.+(s['yz'][:-1,:-1]-s['yz'][1:,1:])**2.)**0.5
# s['dsnca'][0,:] = s['dsnza'][1,:]
# s['dsnca'][:,0] = s['dsnza'][:,1]
# s['dsnca'][0,0] = s['dsnza'][1,1]
#
# # Distances diagonal in sn-direction (a)
# s['dsncb'][1:,1:] = ((s['xz'][:-1,:-1]-s['xz'][1:,1:])**2.+(s['yz'][:-1,:-1]-s['yz'][1:,1:])**2.)**0.5
# s['dsncb'][0,:] = s['dsnzb'][1,:]
# s['dsncb'][:,0] = s['dsnzb'][:,1]
# s['dsncb'][0,0] = s['dsnzb'][1,1]
# Cell areas
# s['dsdnu'][:-1,:-1] = (0.5*(s['dsc'][:-1,:-1]+s['dsc'][1:,:-1])) * (0.5*(s['dnz'][:-1,:-1]+s['dnz'][:-1,1:]))
# s['dsdnv'][:-1,:-1] = (0.5*(s['dsz'][:-1,:-1]+s['dsz'][1:,:-1])) * (0.5*(s['dnc'][:-1,:-1]+s['dnc'][:-1,1:]))
s['dsdnz'][:-1,:-1] = (0.5*(s['dsv'][:-1,:-1]+s['dsv'][1:,:-1])) * (0.5*(s['dnu'][:-1,:-1]+s['dnu'][:-1,1:]))
# s['dsdnu'][:-1,-1] = s['dsdnu'][:-1,-2]
# s['dsdnv'][:-1,-1] = s['dsdnv'][:-1,-2]
s['dsdnz'][:-1,-1] = s['dsdnz'][:-1,-2]
# s['dsdnu'][-1,:] = s['dsdnu'][-2,:]
# s['dsdnv'][-1,:] = s['dsdnv'][-2,:]
s['dsdnz'][-1,:] = s['dsdnz'][-2,:]
# Inverse cell areas
# s['dsdnui'][:,:] = 1. / s['dsdnu']
# s['dsdnvi'][:,:] = 1. / s['dsdnv']
s['dsdnzi'][:,:] = 1. / s['dsdnz']
# Alfaz, grid orientation in z-points
s['alfaz'][:-1,:] = np.arctan2(s['yu'][1:,:] - s['yu'][:-1,:], s['xu'][1:,:] - s['xu'][:-1,:])
s['alfaz'][-1,:] = s['alfaz'][-2,:]
# Alfau, grid orientation in u-points
s['alfau'][1:,:] = np.arctan2(s['yz'][1:,:] - s['yz'][:-1,:], s['xz'][1:,:] - s['xz'][:-1,:])
s['alfau'][0,:] = s['alfau'][1,:]
# Alfav, grid orientation in v-points
s['alfav'][:-1,:] = np.arctan2(s['yc'][1:,:] - s['yc'][:-1,:], s['xc'][1:,:] - s['xc'][:-1,:])
s['alfav'][-1,:] = s['alfav'][-2,:]
# print(np.rad2deg(s['alfaz']))
# print(np.rad2deg(s['alfau']))
# print(np.rad2deg(s['alfav']))
# print(s['sz'][:,:])
# print(s['nz'][:,:])
# print(s['sv'][:,:])
# print(s['sc'][:,:])
# print(s['dsz'][:,:])
# print(s['dsu'][:,:])
# print(s['dsv'][:,:])
# print(s['dsc'][:,:])
# print(s['dsdnz'][:,:])
# print(s['dsdnu'][:,:])
return s | gpl-3.0 | -2,599,561,361,544,794,000 | 34.947917 | 114 | 0.409941 | false | 2.421404 | false | false | false |
sony/nnabla | python/src/nnabla/utils/nnp_graph.py | 1 | 10867 | # Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import OrderedDict
import os
import weakref
import numpy as np
import itertools
import nnabla as nn
import nnabla.function as F
class NnpNetwork(object):
'''A graph object which is read from nnp file.
An instance of NnpNetwork is usually created by an NnpLoader instance.
See an example usage described in :obj:`NnpLoader`.
Attributes:
variables (dict): A dict of all variables in a created graph
with a variable name as a key, and a nnabla.Variable as a value.
inputs (dict): All input variables.
outputs (dict): All output variables.
'''
def __init__(self, proto_network, batch_size, callback):
proto_network = proto_network.expand_loop_control()
self.proto_network = proto_network.promote(callback)
self.proto_network(batch_size=batch_size)
for k, v in itertools.chain(
self.proto_network.variables.items(), self.proto_network.parameters.items()):
v.variable_instance.name = k
self._inputs = {
i: self.proto_network.variables[i].variable_instance
for i in self.proto_network.inputs
}
self._outputs = {
i: self.proto_network.variables[i].variable_instance
for i in self.proto_network.outputs
}
self._variables = {
k: v.variable_instance
for k, v in itertools.chain(
self.proto_network.variables.items(), self.proto_network.parameters.items())
}
# publish network's parameters to current parameter scope
# like original implementation.
with nn.parameter_scope('', nn.get_current_parameter_scope()):
for k, v in self.proto_network.parameters.items():
nn.parameter.set_parameter(k, v.variable_instance)
@property
def inputs(self):
return self._inputs
@property
def outputs(self):
return self._outputs
@property
def variables(self):
return self._variables
class NnpLoader(object):
'''An NNP file loader.
Args:
filepath : file-like object or filepath.
extension: if filepath is file-like object, extension is one of ".nnp", ".nntxt", ".prototxt".
Example:
.. code-block:: python
from nnabla.utils.nnp_graph import NnpLoader
# Read a .nnp file.
nnp = NnpLoader('/path/to/nnp.nnp')
# Assume a graph `graph_a` is in the nnp file.
net = nnp.get_network(network_name, batch_size=1)
# `x` is an input of the graph.
x = net.inputs['x']
# 'y' is an outputs of the graph.
y = net.outputs['y']
# Set random data as input and perform forward prop.
x.d = np.random.randn(*x.shape)
y.forward(clear_buffer=True)
print('output:', y.d)
'''
def __init__(self, filepath, scope=None, extension=".nntxt"):
# OrderedDict maintains loaded parameters from nnp files.
# The loaded parameters will be copied to the current
# scope when get_network is called.
self._params = scope if scope else OrderedDict()
self.g = nn.graph_def.load(
filepath, parameter_scope=self._params, rng=np.random.RandomState(1223), extension=extension)
self.network_dict = {
name: pn for name, pn in self.g.networks.items()
}
def get_network_names(self):
'''Returns network names available.
'''
return list(self.network_dict.keys())
def get_network(self, name, batch_size=None, callback=None):
'''Create a variable graph given network by name
Returns: NnpNetwork
'''
return NnpNetwork(self.network_dict[name], batch_size, callback=callback)
class NnpNetworkPass(object):
def _no_verbose(self, *a, **kw):
pass
def _verbose(self, *a, **kw):
print(*a, **kw)
def __init__(self, verbose=0):
self._variable_callbacks = {}
self._function_callbacks_by_name = {}
self._function_callbacks_by_type = {}
self._passes_by_name = {}
self._passes_by_type = {}
self._fix_parameters = False
self._use_up_to_variables = set()
self.verbose = self._no_verbose
self.verbose2 = self._no_verbose
if verbose:
self.verbose = self._verbose
if verbose > 1:
self.verbose2 = self._verbose
def on_function_pass_by_name(self, name):
def _on_function_pass_by_name(callback):
def _callback(f, variables, param_scope):
return callback(f, variables, param_scope)
self._passes_by_name[name] = _callback
return _callback
return _on_function_pass_by_name
def on_function_pass_by_type(self, name):
def _on_function_pass_by_type(callback):
def _callback(f, variables, param_scope):
return callback(f, variables, param_scope)
self._passes_by_name[name] = _callback
return _callback
return _on_function_pass_by_type
def on_generate_variable(self, name):
def _on_generate_variable(callback):
def _callback(v):
return callback(v)
self._variable_callbacks[name] = _callback
return _callback
return _on_generate_variable
def on_generate_function_by_name(self, name):
def _on_generate_function_by_name(callback):
def _callback(v):
return callback(v)
self._function_callbacks_by_name[name] = _callback
return _callback
return _on_generate_function_by_name
def on_generate_function_by_type(self, name):
def _on_generate_function_by_type(callback):
def _callback(v):
return callback(v)
self._function_callbacks_by_type[name] = _callback
return _callback
return _on_generate_function_by_type
def drop_function(self, *names):
def callback(f, variables, param_scope):
self.verbose('Pass: Deleting {}.'.format(f.name))
f.disable()
for name in names:
self.on_function_pass_by_name(name)(callback)
def fix_parameters(self):
self._fix_parameters = True
def use_up_to(self, *names):
self._use_up_to_variables.update(set(names))
def remove_and_rewire(self, name, i=0, o=0):
@self.on_function_pass_by_name(name)
def on_dr(f, variables, param_scope):
fi = f.inputs[i]
fo = f.outputs[o]
self.verbose('Removing {} and rewire input={} and output={}.'.format(
f.name, fi.name, fo.name))
fo.rewire_on(fi)
# Use input name
fo.proto.name = fi.name
def set_variable(self, name, input_var):
@self.on_generate_variable(name)
def on_input_x(v):
self.verbose('Replace {} by {}.'.format(name, input_var))
v.proto.shape.dim[:] = input_var.shape
v.variable = input_var
input_var.name = v.name
return v
def force_average_pooling_global(self, name, by_type=False):
dec = self.on_generate_function_by_name
if by_type:
dec = self.on_generate_function_by_type
@dec(name)
def on_avgpool(f):
pool_shape = f.inputs[0].proto.shape.dim[2:]
self.verbose('Change strides of {} to {}.'.format(
f.name, pool_shape))
p = f.proto.average_pooling_param
p.kernel.dim[:] = pool_shape
p.stride.dim[:] = pool_shape
return f
def check_average_pooling_global(self, name, by_type=False):
dec = self.on_generate_function_by_name
if by_type:
dec = self.on_generate_function_by_type
@dec(name)
def on_avgpool_check(f):
pool_shape = f.inputs[0].proto.shape.dim[2:]
p = f.proto.average_pooling_param
if p.kernel.dim[:] != pool_shape or p.stride.dim[:] != pool_shape:
raise ValueError(
'Stride configuration of average pooling is not for global pooling.'
' Given Image shape is {}, whereas pooling window size is {} and its stride is {}.'
' Consider using force_global_pooling=True'.format(
pool_shape, p.kernel.dim[:], p.stride.dim[:]))
return f
def set_batch_normalization_batch_stat_all(self, batch_stat):
@self.on_generate_function_by_type('BatchNormalization')
def on_bn(f):
self.verbose('Setting batch_stat={} at {}.'.format(
batch_stat, f.name))
p = f.proto.batch_normalization_param
p.batch_stat = batch_stat
return f
def _apply_function_pass_by_name(self, f, variables, param_scope):
if f.name not in self._passes_by_name:
return f
return self._passes_by_name[f.name](f, variables, param_scope)
def _apply_function_pass_by_type(self, f, variables, param_scope):
if f.proto.type not in self._passes_by_type:
return f
return self._passes_by_type[f.proto.type](f, variables, param_scope)
def _apply_generate_variable(self, v):
if v.name in self._variable_callbacks:
v = self._variable_callbacks[v.name](v)
if self._fix_parameters:
v.need_grad = False
return v
def _apply_generate_function_by_name(self, f):
if f.name not in self._function_callbacks_by_name:
return f
return self._function_callbacks_by_name[f.name](f)
def _apply_generate_function_by_type(self, f):
if f.proto.type not in self._function_callbacks_by_type:
return f
return self._function_callbacks_by_type[f.proto.type](f)
def _apply_use_up_to(self, variables):
for v in variables:
if v.name in self._use_up_to_variables:
self.verbose('Stopping at {}.'.format(v.name))
v.stop = True
| apache-2.0 | 1,661,325,578,871,901,400 | 34.168285 | 105 | 0.594092 | false | 3.886624 | false | false | false |
eduardoneira/SistemasDistribuidos_TPFinal | CentroMonitoreoCiudad/FaceRecognizer/modules/old_feature_matcher.py | 1 | 4628 | #!/bin/python3
import numpy as np
import cv2
import base64
import pdb
from tkinter import *
from matplotlib import pyplot as plt
class FeatureMatcher:
__PORC_DISTANCE = 0.7
def __init__(self,feature_extractor='SURF',upright=True,min_match_count=10,threshold=400):
self.MIN_MATCH_COUNT = min_match_count
self.__create_feature_extractor(feature_extractor,upright,threshold)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 200)
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
def __create_feature_extractor(self,feature_extractor,upright,threshold):
if feature_extractor == 'SURF':
self.feature_finder = cv2.xfeatures2d.SURF_create(threshold,extended=True)
self.feature_finder.setUpright(upright)
elif feature_extractor == 'SIFT':
self.feature_finder = cv2.xfeatures2d.SIFT_create(edgeThreshold=20,sigma=1.1)
elif feature_extractor == 'ORB':
self.feature_finder = cv2.ORB_create()
else:
raise 'Feature extractor no encontrado'
def compare(self,img1,img2):
self.features_img1 = self.find_features(img1)
self.features_img2 = self.find_features(img2)
pdb.set_trace()
return self.flann.knnMatch(self.features_img1[1],self.features_img2[1],k=2)
def compare_base64(self,image1_base64,image2_base64):
img1 = self.base64_to_img(image1_base64)
img2 = self.base64_to_img(image2_base64)
return self.compare(img1,img2)
def are_similar(self,img1,img2):
self.good_matches = []
for m,n in self.compare(img1,img2):
if m.distance < self.__PORC_DISTANCE*n.distance:
self.good_matches.append(m)
return (len(self.good_matches) > self.MIN_MATCH_COUNT)
def find_features(self,img):
return self.feature_finder.detectAndCompute(img,None)
def bytes_to_img(self,image_bytes):
nparr = np.fromstring(image_bytes, np.uint8)
return cv2.imdecode(nparr, 0)
def base64_to_img(self,image_base64):
return self.bytes_to_img(base64.b64decode(image_base64))
def compare_and_draw_base64(self,img1,img2):
self.compare_and_draw(self.base64_to_img(img1),self.base64_to_img(img2))
def compare_and_draw(self,img1,img2):
# if self.are_similar(img1,img2):
# src_pts = np.float32([ self.features_img1[0][m.queryIdx].pt for m in self.good_matches ]).reshape(-1,1,2)
# dst_pts = np.float32([ self.features_img2[0][m.trainIdx].pt for m in self.good_matches ]).reshape(-1,1,2)
# M, mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0)
# matchesMask = mask.ravel().tolist()
# h,w = img1.shape
# pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
# dst = cv2.perspectiveTransform(pts,M)
# img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3,cv2.LINE_AA)
# else:
# print("Not enough matches are found - %d/%d" % (len(self.good_matches),self.MIN_MATCH_COUNT))
# matchesMask = None
# draw_params = dict(matchColor = (0,255,0),
# singlePointColor = (255,0,0),
# matchesMask = matchesMask,
# flags = 2)
# img3 = cv2.drawMatchesKnn(img1,self.features_img1[0],img2,self.features_img2[0],self.good_matches,None,**draw_params)
# plt.imshow(img3,'gray'),plt.show()
hash1 = self.find_features(img1)
hash2 = self.find_features(img2)
matches = self.flann.knnMatch(hash1[1],hash2[1],k=2)
good = []
for m,n in matches:
if m.distance < 0.95*n.distance:
good.append(m)
print(len(good))
if len(good)>self.MIN_MATCH_COUNT:
src_pts = np.float32([ hash1[0][m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ hash2[0][m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), self.MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = (255,0,0),
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,hash1[0],img2,hash2[0],good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show() | gpl-3.0 | 3,252,840,999,377,199,600 | 36.942623 | 123 | 0.645635 | false | 2.848 | false | false | false |
mpdehaan/camp | camp/core/scale.py | 1 | 3292 | """
Copyright 2016, Michael DeHaan <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# https://en.wikipedia.org/wiki/List_of_musical_scales_and_modes
SCALE_TYPES = dict(
major = [ 1, 2, 3, 4, 5, 6, 7 ],
natural_minor = [ 1, 2, 'b3', 4, 5, 'b6', 'b7' ],
blues = [ 1, 'b3', 4, 'b5', 5, 'b7' ],
dorian = [ 1, 2, 'b3', 4, 5, 6, 'b7' ],
chromatic = [ 1, 'b2', 2, 'b3', 3, 4, 'b5', 5, 'b6', 6, 'b7', 7 ],
harmonic_major = [ 1, 2, 3, 4, 5, 'b6', 7 ],
harmonic_minor = [ 1, 2, 3, 4, 5, 'b6', 7 ],
locrian = [ 1, 'b2', 'b3', 4, 'b5', 'b6', 'b7' ],
lydian = [ 1, 2, 3, 'b4', 5, 6, 7 ],
major_pentatonic = [ 1, 2, 3, 5, 6 ],
melodic_minor_asc = [ 1, 2, 'b3', 4, 5, 'b7', 'b8', 8 ],
melodic_minor_desc = [ 1, 2, 'b3', 4, 5, 'b6', 'b7', 8 ],
minor_pentatonic = [ 1, 'b3', 4, 5, 'b7' ],
mixolydian = [ 1, 2, 3, 4, 5, 6, 'b7' ],
phyrigian = [ 1, 'b2', 'b3', 4, 5, 'b6', 'b7' ],
)
SCALE_ALIASES = dict(
aeolian = 'natural_minor',
ionian = 'major',
minor = 'natural_minor'
)
from camp.core.note import note
class Scale(object):
def __init__(self, root=None, typ=None):
"""
Constructs a scale:
scale = Scale(root='C4', typ='major')
"""
assert root is not None
assert typ is not None
if isinstance(root, str):
root = note(root)
self.root = root
self.typ = typ
def generate(self, length=None):
"""
Allows traversal of a scale in a forward direction.
Example:
for note in scale.generate(length=2):
print note
"""
assert length is not None
typ = SCALE_ALIASES.get(self.typ, self.typ)
scale_data = SCALE_TYPES[typ][:]
octave_shift = 0
index = 0
while (length is None or length > 0):
if index == len(scale_data):
index = 0
octave_shift = octave_shift + 1
result = self.root.transpose(degrees=scale_data[index], octaves=octave_shift)
yield(result)
index = index + 1
if length is not None:
length = length - 1
def __eq__(self, other):
"""
Scales are equal if they are the ... same scale
"""
if other is None:
return False
return self.root == other.root and self.typ == other.typ
def short_name(self):
return "%s %s" % (self.root.short_name(), self.typ)
def __repr__(self):
return "Scale<%s>" % self.short_name()
def scale(input):
"""
Shortcut: scale(['C major') -> Scale object
"""
(root, typ) = input.split()
return Scale(root=note(root), typ=typ)
| apache-2.0 | 5,020,501,145,689,694,000 | 29.766355 | 89 | 0.536148 | false | 3.171484 | false | false | false |
ingkebil/trost | scripts/process_xls.py | 1 | 1832 | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import sys
import math
import xlrd
import data_objects as DO
import cast
""" Excel cell type decides which cast function to use. """
CAST_FUNC = {xlrd.XL_CELL_EMPTY: str,
xlrd.XL_CELL_TEXT: cast.cast_str,
xlrd.XL_CELL_NUMBER: float,
xlrd.XL_CELL_DATE: cast.cast_str,
xlrd.XL_CELL_BOOLEAN: int,
xlrd.XL_CELL_ERROR: int,
xlrd.XL_CELL_BLANK: cast.cast_str}
""" Parcelle information is stored on sheet 3, at least for Golm.xls. """
DEFAULT_PARCELLE_INDEX = 2
""" Treatment/Aliquot relations are stored on sheet 1. """
DEFAULT_TREATMENT_ALIQUOT_INDEX = 0
#
def read_xls_data(fn, sheet_index=0, include_time=False):
data = []
book = xlrd.open_workbook(fn)
sheet = book.sheet_by_index(sheet_index)
col_headers = [str(cell.value.encode('utf8')).replace(' ', '_')
for cell in sheet.row(0)]
for i in xrange(1, sheet.nrows):
row = []
for cell in sheet.row(i):
if cell.ctype == xlrd.XL_CELL_DATE:
# print 'DATE', cell.value
# print xlrd.xldate_as_tuple(cell.value, book.datemode)
cell_date = xlrd.xldate_as_tuple(cell.value, book.datemode)
if not include_time:
row.append('%4i-%02i-%02i ' % cell_date[:3])
else:
row.append('%4i-%02i-%02i %02i:%02i:%02i' % (cell_date[:3] + cell_date[-3:]))
else:
row.append(CAST_FUNC[cell.ctype](cell.value))
# row = [CAST_FUNC[cell.ctype](cell.value) for cell in sheet.row(i)]
data.append(DO.DataObject(col_headers, row))
# print data[-1].__dict__
return data, col_headers
if __name__ == '__main__': main(sys.argv[1:])
| gpl-2.0 | -938,439,755,346,230,800 | 30.050847 | 97 | 0.566048 | false | 3.214035 | false | false | false |
induane/stomp.py3 | stomp/test/threading_test.py | 1 | 3873 | try:
from queue import Queue, Empty, Full
except ImportError:
from Queue import Queue, Empty, Full
import threading
import sys
import time
import unittest
import stomp
from testutils import *
class MQ(object):
def __init__(self):
self.connection = stomp.Connection(get_standard_host(), 'admin', 'password')
self.connection.set_listener('', None)
self.connection.start()
self.connection.connect(wait=True)
def send(self, topic, msg, persistent='true', retry=False):
self.connection.send(destination="/topic/%s" % topic, message=msg,
persistent=persistent)
mq = MQ()
class TestThreading(unittest.TestCase):
def setUp(self):
"""Test that mq sends don't wedge their threads.
Starts a number of sender threads, and runs for a set amount of
time. Each thread sends messages as fast as it can, and after each
send, pops from a Queue. Meanwhile, the Queue is filled with one
marker per second. If the Queue fills, the test fails, as that
indicates that all threads are no longer emptying the queue, and thus
must be wedged in their send() calls.
"""
self.Q = Queue(10)
self.Cmd = Queue()
self.Error = Queue()
self.clients = 20
self.threads = []
self.runfor = 20
for i in range(0, self.clients):
t = threading.Thread(name="client %s" % i,
target=self.make_sender(i))
t.setDaemon(1)
self.threads.append(t)
def tearDown(self):
for t in self.threads:
if not t.isAlive:
print("thread", t, "died")
self.Cmd.put('stop')
for t in self.threads:
t.join()
print()
print()
errs = []
while 1:
try:
errs.append(self.Error.get(block=False))
except Empty:
break
print("Dead threads:", len(errs), "of", self.clients)
etype = {}
for ec, ev, tb in errs:
if ec in etype:
etype[ec] = etype[ec] + 1
else:
etype[ec] = 1
for k in sorted(etype.keys()):
print("%s: %s" % (k, etype[k]))
mq.connection.disconnect()
def make_sender(self, i):
Q = self.Q
Cmd = self.Cmd
Error = self.Error
def send(i=i, Q=Q, Cmd=Cmd, Error=Error):
counter = 0
print("%s starting" % i)
try:
while 1:
# print "%s sending %s" % (i, counter)
try:
mq.send('testclientwedge',
'Message %s:%s' % (i, counter))
except:
Error.put(sys.exc_info())
# thread will die
raise
else:
# print "%s sent %s" % (i, counter)
try:
Q.get(block=False)
except Empty:
pass
try:
if Cmd.get(block=False):
break
except Empty:
pass
counter +=1
finally:
print("final", i, counter)
return send
def test_threads_dont_wedge(self):
for t in self.threads:
t.start()
start = time.time()
while time.time() - start < self.runfor:
try:
self.Q.put(1, False)
time.sleep(1.0)
except Full:
assert False, "Failed: 'request' queue filled up"
print("passed")
| apache-2.0 | 8,511,249,930,972,715,000 | 30.745902 | 84 | 0.46992 | false | 4.416192 | true | false | false |
Dziolas/invenio | modules/bibformat/lib/elements/bfe_keywords.py | 1 | 2158 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints keywords
"""
__revision__ = "$Id$"
import cgi
from urllib import quote
from invenio.config import CFG_BASE_URL
def format_element(bfo, keyword_prefix, keyword_suffix, separator=' ; ', link='yes'):
"""
Display keywords of the record.
@param keyword_prefix: a prefix before each keyword
@param keyword_suffix: a suffix after each keyword
@param separator: a separator between keywords
@param link: links the keywords if 'yes' (HTML links)
"""
keywords = bfo.fields('6531_a')
if len(keywords) > 0:
if link == 'yes':
keywords = ['<a href="' + CFG_BASE_URL + '/search?f=keyword&p='+ \
quote('"' + keyword + '"') + \
'&ln='+ bfo.lang+ \
#'">' + cgi.escape(keyword) + '</a>'
'">' + keyword + '</a>'
for keyword in keywords]
#else:
# keywords = [cgi.escape(keyword)
# for keyword in keywords]
keywords = [keyword_prefix + keyword + keyword_suffix
for keyword in keywords]
return separator.join(keywords)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 | 7,402,563,837,806,346,000 | 34.966667 | 85 | 0.616775 | false | 4.15 | false | false | false |
oudalab/phyllo | phyllo/extractors/frodebertusDB.py | 1 | 2443 | import sqlite3
import urllib
from urllib.request import urlopen
from bs4 import BeautifulSoup, NavigableString
import nltk
nltk.download('punkt')
from nltk import sent_tokenize
def parseRes2(soup, title, url, cur, author, date, collectiontitle):
chapter = '-'
sen = ""
num = 1
[e.extract() for e in soup.find_all('br')]
[e.extract() for e in soup.find_all('table')]
[e.extract() for e in soup.find_all('font')]
getp = soup.find_all('p')
#print(getp)
i = 0
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
if p.b:
chapter = p.b.text
chapter = chapter.strip()
if chapter[0].isdigit():
chapter = chapter[2:]
chapter = chapter.strip()
else:
sen = p.text
sen = sen.strip()
num = 0
if sen != '':
for s in sen.split('\n'):
sentn = s
num += 1
cur.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn, url, 'prose'))
def main():
# get proper URLs
siteURL = 'http://www.thelatinlibrary.com'
biggsURL = 'http://www.thelatinlibrary.com/frodebertus.html'
biggsOPEN = urllib.request.urlopen(biggsURL)
biggsSOUP = BeautifulSoup(biggsOPEN, 'html5lib')
textsURL = []
title = 'Frodebertus & Importunus'
author = 'Frodebertus & Importunus'
collectiontitle = 'FRODEBERTUS AND IMPORTUNUS'
date = '-'
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Frodebertus & Importunus'")
parseRes2(biggsSOUP, title, biggsURL, c, author, date, collectiontitle)
if __name__ == '__main__':
main()
| apache-2.0 | 8,485,862,798,001,893,000 | 31.573333 | 103 | 0.548506 | false | 3.787597 | false | false | false |
anrl/gini3 | frontend/src/gbuilder/UI/Edge.py | 1 | 4871 | """The graphical representation of connections of nodes"""
import math
from PyQt4 import QtCore, QtGui
from Core.Item import *
from Core.globals import options, mainWidgets, defaultOptions
class Edge(QtGui.QGraphicsLineItem, Item):
def __init__(self, startItem, endItem, parent=None, scene=None):
"""
Create an edge between two nodes, linking them together graphically.
"""
QtGui.QGraphicsLineItem.__init__(self, parent, scene)
self.source = startItem
self.dest = endItem
self.sourcePoint = QtCore.QPointF()
self.destPoint = QtCore.QPointF()
self.source.addEdge(self)
self.dest.addEdge(self)
self.properties = {}
self.setProperty("id", "SomeEdge")
self.interfaces = []
self.setPen(QtGui.QPen(QtCore.Qt.black, 2, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.adjust()
def boundingRect(self):
"""
Get the bounding rectangle of the edge.
"""
extra = (self.pen().width() + 20) / 2.0
p1 = self.line().p1()
p2 = self.line().p2()
return QtCore.QRectF(p1, QtCore.QSizeF(p2.x() - p1.x(), p2.y() - p1.y())).normalized().adjusted(-extra, -extra, extra, extra)
def sourceNode(self):
"""
Get the source node.
"""
return self.source
def setSourceNode(self, node):
"""
Set the source node.
"""
self.source = node
self.adjust()
def destNode(self):
"""
Get the destination node.
"""
return self.dest
def setDestNode(self, node):
"""
Set the destination node.
"""
self.dest = node
self.adjust()
def shape(self):
"""
Get the shape of the edge.
"""
return QtGui.QGraphicsLineItem.shape(self)
def adjust(self):
"""
Adjust length and angle of edge based on movement of nodes.
"""
if not self.source or not self.dest:
return
line = QtCore.QLineF(self.mapFromItem(self.source, 0, 0), self.mapFromItem(self.dest, 0, 0))
self.setLine(line)
length = line.length()
if length == 0.0:
return
edgeOffset = QtCore.QPointF((line.dx() * 20) / length, (line.dy() * 20) / length)
self.prepareGeometryChange()
self.sourcePoint = line.p1() + edgeOffset
self.destPoint = line.p2() - edgeOffset
def paint(self, painter, option, widget=None):
"""
Draw the representation.
"""
if (self.source.collidesWithItem(self.dest)):
return
painter.setRenderHint(QtGui.QPainter.Antialiasing, options["smoothing"])
if self.device_type == "Wireless_Connection":
pen = QtGui.QPen()
pen.setDashPattern([10,10])
painter.setPen(pen)
painter.drawLine(self.line())
if self.isSelected():
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DashLine))
baseLine = QtCore.QLineF(0,0,1,0)
myLine = QtCore.QLineF(self.line())
angle = math.radians(myLine.angle(baseLine))
myLine.translate(4.0 * math.sin(angle), 4.0 * math.cos(angle))
painter.drawLine(myLine)
myLine.translate(-8.0 * math.sin(angle), -8.0 * math.cos(angle))
painter.drawLine(myLine)
def delete(self):
"""
Delete the edge and remove it from its nodes.
"""
if mainWidgets["main"].isRunning():
mainWidgets["log"].append("You cannot delete items from a running topology!")
return
from Tutorial import Tutorial
if isinstance(mainWidgets["canvas"], Tutorial):
mainWidgets["log"].append("You cannot delete items from the tutorial!")
return
self.source.removeEdge(self)
self.dest.removeEdge(self)
self.scene().removeItem(self)
def contextMenu(self, pos):
"""
Pop up the context menu on right click.
"""
self.menu = QtGui.QMenu()
self.menu.setPalette(defaultOptions["palette"])
self.menu.addAction("Delete", self.delete)
self.menu.exec_(pos)
def toString(self):
"""
Return a string representation of the graphical edge.
"""
graphical = "edge:(" + self.source.getName() + "," + self.dest.getName() + ")\n"
logical = ""
for prop, value in self.properties.iteritems():
logical += "\t" + prop + ":" + value + "\n"
return graphical + logical
| mit | 1,296,057,378,893,043,700 | 29.836601 | 133 | 0.554917 | false | 4.018977 | false | false | false |
seanjh/CanadianInsiderTransactions | __main__.py | 1 | 1156 | """Canadian Insider Transactions.
Usage:
sedi_transactions <issuer_num>...
Options:
-h --help Show this screen.
--version Show version.
"""
import os
from docopt import docopt
from sedi_transactions.transactions import SEDIView
OUTPUT_PATH = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', 'output')
)
if not os.path.exists(OUTPUT_PATH):
os.mkdir(OUTPUT_PATH)
def write_html(html_text, encoding, filename):
with open(filename, 'w', encoding=encoding) as outfile:
outfile.write(html_text)
def main():
arguments = docopt(__doc__, version='Canadian Insider Transactions 0.1')
sedar_issuers = arguments.get('<issuer_num>')
with SEDIView() as sv:
i = 0
while i < len(sedar_issuers):
html = sv.get_transactions_view(sedar_issuers[i])
filename = os.path.join(OUTPUT_PATH,
('{0}.html').format(sedar_issuers[i]))
if html:
print("Downloading HTML to {0}".format(filename))
write_html(html, sv.encoding, filename)
i += 1
if __name__ == '__main__':
main() | mit | 6,034,782,397,232,671,000 | 25.906977 | 76 | 0.595156 | false | 3.492447 | false | false | false |
eyp-developers/statistics | statistics/migrations/0044_topic_overview_link.py | 1 | 2256 | # Generated by Django 2.0.1 on 2018-07-25 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statistics', '0043_merge_20180207_1821'),
]
operations = [
migrations.AddField(
model_name='session',
name='topic_overview_link',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='historictopicplace',
name='historic_country',
field=models.CharField(blank=True, choices=[('AL', 'Albania'), ('AM', 'Armenia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BA', 'Bosnia and Herzegovina'), ('HR', 'Croatia'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DK', 'Denmark'), ('EE', 'Estonia'), ('FI', 'Finland'), ('FR', 'France'), ('GE', 'Georgia'), ('DE', 'Germany'), ('GR', 'Greece'), ('HU', 'Hungary'), ('IE', 'Ireland'), ('IT', 'Italy'), ('XK', 'Kosovo'), ('LV', 'Latvia'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('NL', 'The Netherlands'), ('NO', 'Norway'), ('PL', 'Poland'), ('PT', 'Portugal'), ('RO', 'Romania'), ('RU', 'Russia'), ('RS', 'Serbia'), ('SI', 'Slovenia'), ('SK', 'Slovakia'), ('ES', 'Spain'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('TR', 'Turkey'), ('UA', 'Ukraine'), ('GB', 'The United Kingdom')], max_length=2, null=True),
),
migrations.AlterField(
model_name='historictopicplace',
name='historic_session_type',
field=models.CharField(blank=True, choices=[('IS', 'International Session'), ('IF', 'International Forum'), ('NS', 'National Session'), ('RS', 'Regional Session'), ('SS', 'Small Scale Session'), ('OE', 'Other Event')], max_length=3, null=True),
),
migrations.AlterField(
model_name='topic',
name='difficulty',
field=models.CharField(blank=True, choices=[('E', 'Easy'), ('I', 'Intermediate'), ('H', 'Hard')], max_length=1, null=True),
),
migrations.AlterField(
model_name='topic',
name='type',
field=models.CharField(blank=True, choices=[('CR', 'Creative'), ('CF', 'Conflict'), ('ST', 'Strategy')], max_length=2, null=True),
),
]
| gpl-3.0 | -440,385,167,275,794,300 | 58.368421 | 864 | 0.542996 | false | 3.241379 | false | false | false |
f-prettyland/angr | angr/state_plugins/fast_memory.py | 1 | 8768 | import logging
import claripy
from ..storage.memory import SimMemory
from ..errors import SimFastMemoryError
l = logging.getLogger("angr.state_plugins.fast_memory")
l.setLevel(logging.DEBUG)
class SimFastMemory(SimMemory):
def __init__(self, memory_backer=None, memory_id=None, endness=None, contents=None, width=None, uninitialized_read_handler=None):
SimMemory.__init__(self, endness=endness)
self._contents = { } if contents is None else contents
self.width = width
self._uninitialized_read_handler = uninitialized_read_handler
self.id = memory_id
self._backer = memory_backer
if self._backer is not None:
raise SimFastMemoryError("TODO: support memory backers in SimFastMemory")
# TODO: support backers
#def _get_from_backer(self, missing_addr, size):
# for addr, backer in self._memory_backer.cbackers:
# start_backer = missing_addr - addr
# if start_backer < 0 and abs(start_backer) >= self._page_size: continue
# if start_backer >= len(backer): continue
# snip_start = max(0, start_backer)
# write_start = max(missing_addr, addr + snip_start)
# write_size = self._page_size - write_start%self._page_size
# snip = _ffi.buffer(backer)[snip_start:snip_start+write_size]
# mo = SimMemoryObject(claripy.BVV(snip), write_start)
# self._apply_object_to_page(n*self._page_size, mo, page=new_page)
def set_state(self, state):
super(SimFastMemory, self).set_state(state)
if self.width is None:
self.width = self.state.arch.bytes
def _handle_uninitialized_read(self, addr, inspect=True, events=True):
"""
The default uninitialized read handler. Returns symbolic bytes.
"""
if self._uninitialized_read_handler is None:
v = self.state.se.BVS("%s_%s" % (self.id, addr), self.width*self.state.arch.byte_width, key=self.variable_key_prefix + (addr,), inspect=inspect, events=events)
return v.reversed if self.endness == "Iend_LE" else v
else:
return self._uninitialized_read_handler(self, addr, inspect=inspect, events=events)
def _translate_addr(self, a): #pylint:disable=no-self-use
"""
Resolves this address.
"""
if isinstance(a, claripy.ast.Base) and not a.singlevalued:
raise SimFastMemoryError("address not supported")
return self.state.se.eval(a)
def _translate_data(self, d): #pylint:disable=no-self-use
"""
Checks whether this data can be supported by FastMemory."
"""
return d
def _translate_size(self, s): #pylint:disable=no-self-use
"""
Checks whether this size can be supported by FastMemory."
"""
if isinstance(s, claripy.ast.Base) and not s.singlevalued:
raise SimFastMemoryError("size not supported")
if s is None:
return s
return self.state.se.eval(s)
def _translate_cond(self, c): #pylint:disable=no-self-use
"""
Checks whether this condition can be supported by FastMemory."
"""
if isinstance(c, claripy.ast.Base) and not c.singlevalued:
raise SimFastMemoryError("size not supported")
if c is None:
return True
else:
return self.state.se.eval_upto(c, 1)[0]
def _resolve_access(self, addr, size):
"""
Resolves a memory access of a certain size. Returns a sequence of the bases, offsets, and sizes of the accesses required
to fulfil this.
"""
# if we fit in one word
first_offset = addr % self.width
first_base = addr - first_offset
if first_offset + size <= self.width:
return [ (first_base, first_offset, size) ]
last_size = (addr + size) % self.width
last_base = addr + size - last_size
accesses = [ ]
accesses.append((first_base, first_offset, self.width - first_offset))
accesses.extend((a, 0, self.width) for a in range(first_base+self.width, last_base, self.width))
if last_size != 0:
accesses.append((last_base, 0, last_size))
return accesses
def _single_load(self, addr, offset, size, inspect=True, events=True):
"""
Performs a single load.
"""
try:
d = self._contents[addr]
except KeyError:
d = self._handle_uninitialized_read(addr, inspect=inspect, events=events)
self._contents[addr] = d
if offset == 0 and size == self.width:
return d
else:
return d.get_bytes(offset, size)
def _single_store(self, addr, offset, size, data):
"""
Performs a single store.
"""
if offset == 0 and size == self.width:
self._contents[addr] = data
elif offset == 0:
cur = self._single_load(addr, size, self.width - size)
self._contents[addr] = data.concat(cur)
elif offset + size == self.width:
cur = self._single_load(addr, 0, offset)
self._contents[addr] = cur.concat(data)
else:
cur = self._single_load(addr, 0, self.width)
start = cur.get_bytes(0, offset)
end = cur.get_bytes(offset+size, self.width-offset-size)
self._contents[addr] = start.concat(data, end)
def _store(self, req):
data = self._translate_data(req.data) if self._translate_cond(req.condition) else self._translate_data(req.fallback)
if data is None:
l.debug("Received false condition. Returning.")
req.completed = False
req.actual_addresses = [ req.addr ]
return
if req.endness == "Iend_LE" or (req.endness is None and self.endness == "Iend_LE"):
data = data.reversed
addr = self._translate_addr(req.addr)
size = self._translate_addr(req.size) if req.size is not None else data.length/self.state.arch.byte_width
#
# simplify
#
if (self.category == 'mem' and options.SIMPLIFY_MEMORY_WRITES in self.state.options) or \
(self.category == 'reg' and options.SIMPLIFY_REGISTER_WRITES in self.state.options):
data = self.state.se.simplify(data)
accesses = self._resolve_access(addr, size)
if len(accesses) == 1:
# simple case
a,o,s = accesses[0]
self._single_store(a, o, s, data)
else:
cur_offset = 0
for a,o,s in accesses:
portion = data.get_bytes(cur_offset, s)
cur_offset += s
self._single_store(a, o, s, portion)
# fill out the request
req.completed = True
req.actual_addresses = [ req.addr ]
req.stored_values = [ data ]
return req
def _load(self, addr, size, condition=None, fallback=None,
inspect=True, events=True, ret_on_segv=False):
if not self._translate_cond(condition):
l.debug("Received false condition. Returning fallback.")
return fallback
addr = self._translate_addr(addr)
size = self._translate_addr(size)
accesses = self._resolve_access(addr, size)
if len(accesses) == 1:
a,o,s = accesses[0]
return [addr], self._single_load(a, o, s, inspect=inspect, events=events), []
else:
return [addr], claripy.Concat(*[self._single_load(a, o, s) for a,o,s in accesses]), []
def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
raise SimFastMemoryError("find unsupported")
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None):
raise SimFastMemoryError("copy unsupported")
def copy(self):
return SimFastMemory(
endness=self.endness,
contents=dict(self._contents),
width=self.width,
uninitialized_read_handler=self._uninitialized_read_handler,
memory_id=self.id
)
def changed_bytes(self, other):
"""
Gets the set of changed bytes between self and other.
"""
changes = set()
l.warning("FastMemory.changed_bytes(): This implementation is very slow and only for debug purposes.")
for addr,v in self._contents.iteritems():
for i in range(self.width):
other_byte = other.load(addr+i, 1)
our_byte = v.get_byte(i)
if other_byte is our_byte:
changes.add(addr+i)
return changes
from .. import sim_options as options
| bsd-2-clause | -5,351,947,909,110,622,000 | 37.28821 | 171 | 0.591811 | false | 3.740614 | false | false | false |
JohnLunzer/flexx | flexx/event/__init__.py | 1 | 11458 | """
The event module provides a simple system for properties and events,
to let different components of an application react to each-other and
to user input.
In short:
* The :class:`HasEvents <flexx.event.HasEvents>` class provides objects
that have properties and can emit events.
* There are three decorators to create :func:`properties <flexx.event.prop>`,
:func:`readonlies <flexx.event.readonly>` and
:func:`emitters <flexx.event.emitter>`.
* There is a decorator to :func:`connect <flexx.event.connect>` a method
to an event.
Event
-----
An event is something that has occurred at a certain moment in time,
such as the mouse being pressed down or a property changing its value.
In this framework events are represented with dictionary objects that
provide information about the event (such as what button was pressed,
or the old and new value of a property). A custom :class:`Dict <flexx.event.Dict>`
class is used that inherits from ``dict`` but allows attribute access,
e.g. ``ev.button`` as an alternative to ``ev['button']``.
The HasEvents class
-------------------
The :class:`HasEvents <flexx.event.HasEvents>` class provides a base
class for objects that have properties and/or emit events. E.g. a
``flexx.ui.Widget`` inherits from ``flexx.app.Model``, which inherits
from ``flexx.event.HasEvents``.
Events are emitted using the :func:`emit() <flexx.event.HasEvents.emit>`
method, which accepts a name for the type of the event, and optionally a dict,
e.g. ``emitter.emit('mouse_down', dict(button=1, x=103, y=211))``.
The HasEvents object will add two attributes to the event: ``source``,
a reference to the HasEvents object itself, and ``type``, a string
indicating the type of the event.
As a user, you generally do not need to emit events explicitly; events are
automatically emitted, e.g. when setting a property.
Handler
-------
A handler is an object that can handle events. Handlers can be created
using the :func:`connect <flexx.event.connect>` decorator:
.. code-block:: python
from flexx import event
class MyObject(event.HasEvents):
@event.connect('foo')
def handle_foo(self, *events):
print(events)
ob = MyObject()
ob.emit('foo', dict(value=42)) # will invoke handle_foo()
This example demonstrates a few concepts. Firstly, the handler is
connected via a *connection-string* that specifies the type of the
event; in this case the handler is connected to the event-type "foo"
of the object. This connection-string can also be a path, e.g.
"sub.subsub.event_type". This allows for some powerful mechanics, as
discussed in the section on dynamism.
One can also see that the handler function accepts ``*events`` argument.
This is because handlers can be passed zero or more events. If a handler
is called manually (e.g. ``ob.handle_foo()``) it will have zero events.
When called by the event system, it will have at least 1 event. When
e.g. a property is set twice, the handler function is called
just once, with multiple events, in the next event loop iteration. It
is up to the programmer to determine whether only one action is
required, or whether all events need processing. In the latter case,
just use ``for ev in events: ...``.
In most cases, you will connect to events that are known beforehand,
like those they correspond to properties, readonlies and emitters.
If you connect to an event that is not known (as in the example above)
it might be a typo and Flexx will display a warning. Use `'!foo'` as a
connection string (i.e. prepend an exclamation mark) to suppress such
warnings.
Another useful feature of the event system is that a handler can connect to
multiple events at once:
.. code-block:: python
class MyObject(event.HasEvents):
@event.connect('foo', 'bar')
def handle_foo_and_bar(self, *events):
print(events)
To create a handler from a normal function, use the
:func:`HasEvents.connect() <flexx.event.HasEvents.connect>` method:
.. code-block:: python
h = event.HasEvents()
# Using a decorator
@h.connect('foo', 'bar')
def handle_func1(self, *events):
print(events)
# Explicit notation
def handle_func2(self, *events):
print(events)
h.connect(handle_func2, 'foo', 'bar')
Event emitters
--------------
Apart from using :func:`emit() <flexx.event.HasEvents.emit>` there are
certain attributes of ``HasEvents`` instances that generate events.
Properties
==========
Settable properties can be created easiliy using the
:func:`prop <flexx.event.prop>` decorator:
.. code-block:: python
class MyObject(event.HasEvents):
@event.prop
def foo(self, v=0):
''' This is a float indicating bla bla ...
'''
return float(v)
The function that is decorated is essentially the setter function, and
should have one argument (the new value for the property), which can
have a default value (representing the initial value). The function
body is used to validate and normalize the provided input. In this case
the input is simply cast to a float. The docstring of the function will
be the docstring of the property (e.g. for Sphynx docs).
An alternative initial value for a property can be provided upon instantiation:
.. code-block:: python
m = MyObject(foo=3)
Readonly
========
Readonly properties are created with the
:func:`readonly <flexx.event.readonly>` decorator. The value of a
readonly property can be set internally using the
:func:`_set_prop() <flexx.event.HasEvents._set_prop>` method:.
.. code-block:: python
class MyObject(event.HasEvents):
@event.readonly
def foo(self, v=0):
''' This is a float indicating bla.
'''
return float(v)
def _somewhere(self):
self._set_prop('foo', 42)
Emitter
=======
Emitter attributes make it easy to generate events, and function as a
placeholder to document events on a class. They are created with the
:func:`emitter <flexx.event.emitter>` decorator.
.. code-block:: python
class MyObject(event.HasEvents):
@event.emitter
def mouse_down(self, js_event):
''' Event emitted when the mouse is pressed down.
'''
return dict(button=js_event.button)
Emitters can have any number of arguments and should return a dictionary,
which will get emitted as an event, with the event type matching the name
of the emitter.
Labels
------
Labels are a feature that makes it possible to infuence the order by
which event handlers are called, and provide a means to disconnect
specific (groups of) handlers. The label is part of the connection
string: 'foo.bar:label'.
.. code-block:: python
class MyObject(event.HasEvents):
@event.connect('foo')
def given_foo_handler(*events):
...
@event.connect('foo:aa')
def my_foo_handler(*events):
# This one is called first: 'aa' < 'given_f...'
...
When an event is emitted, the event is added to the pending events of
the handlers in the order of a key, which is the label if present, and
otherwise the name of the handler. Note that this does not guarantee
the order in case a handler has multiple connections: a handler can be
scheduled to handle its events due to another event, and a handler
always handles all its pending events at once.
The label can also be used in the
:func:`disconnect() <flexx.event.HasEvents.disconnect>` method:
.. code-block:: python
@h.connect('foo:mylabel')
def handle_foo(*events):
...
...
h.disconnect('foo:mylabel') # don't need reference to handle_foo
Dynamism
--------
Dynamism is a concept that allows one to connect to events for which
the source can change. For the following example, assume that ``Node``
is a ``HasEvents`` subclass that has properties ``parent`` and
``children``.
.. code-block:: python
main = Node()
main.parent = Node()
main.children = Node(), Node()
@main.connect('parent.foo')
def parent_foo_handler(*events):
...
@main.connect('children*.foo')
def children_foo_handler(*events):
...
The ``parent_foo_handler`` gets invoked when the "foo" event gets
emitted on the parent of main. Similarly, the ``children_foo_handler``
gets invoked when any of the children emits its "foo" event. Note that
in some cases you might also want to connect to changes of the ``parent``
or ``children`` property itself.
The event system automatically reconnects handlers when necessary. This
concept makes it very easy to connect to the right events without the
need for a lot of boilerplate code.
Note that the above example would also work if ``parent`` would be a
regular attribute instead of a property, but the handler would not be
automatically reconnected when it changed.
Patterns
--------
This event system is quite flexible and designed to cover the needs
of a variety of event/messaging mechanisms. This section discusses
how this system relates to some common patterns, and how these can be
implemented.
Observer pattern
================
The idea of the observer pattern is that observers keep track (the state
of) of an object, and that object is agnostic about what it's tracked by.
For example, in a music player, instead of writing code to update the
window-title inside the function that starts a song, there would be a
concept of a "current song", and the window would listen for changes to
the current song to update the title when it changes.
In ``flexx.event``, a ``HasEvents`` object keeps track of its observers
(handlers) and notifies them when there are changes. In our music player
example, there would be a property "current_song", and a handler to
take action when it changes.
As is common in the observer pattern, the handlers keep track of the
handlers that they observe. Therefore both handlers and ``HasEvents``
objects have a ``dispose()`` method for cleaning up.
Signals and slots
=================
The Qt GUI toolkit makes use of a mechanism called "signals and slots" as
an easy way to connect different components of an application. In
``flexx.event`` signals translate to readonly properties, and slots to
the handlers that connect to them.
Overloadable event handlers
===========================
In Qt, the "event system" consists of methods that handles an event, which
can be overloaded in subclasses to handle an event differently. In
``flexx.event``, handlers can similarly be re-implemented in subclasses,
and these can call the original handler using ``super()`` if needed.
Publish-subscribe pattern
==========================
In pub-sub, publishers generate messages identified by a 'topic', and
subscribers can subscribe to such topics. There can be zero or more publishers
and zero or more subscribers to any topic.
In ``flexx.event`` a `HasEvents` object can play the role of a broker.
Publishers can simply emit events. The event type represents the message
topic. Subscribers are represented by handlers.
"""
import logging
logger = logging.getLogger(__name__)
del logging
# flake8: noqa
from ._dict import Dict
from ._loop import loop
from ._handler import Handler, connect
from ._emitters import prop, readonly, emitter
from ._hasevents import HasEvents
# from ._hasevents import new_type, with_metaclass
| bsd-2-clause | 7,249,233,369,221,251,000 | 32.211594 | 82 | 0.710159 | false | 3.972954 | false | false | false |
timothycrosley/thedom | thedom/document.py | 1 | 6794 | '''
Document.py
Provides elements that define the html document being served to the client-side
Copyright (C) 2015 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from . import Base, Factory
from .MethodUtils import CallBack
from .MultiplePythonSupport import *
from .Resources import ResourceFile
Factory = Factory.Factory("Document")
DOCTYPE_XHTML_TRANSITIONAL = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">')
DOCTYPE_XHTML_STRICT = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">')
DOCTYPE_XHTML_FRAMESET = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">')
DOCTYPE_HTML4_TRANSITIONAL = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" '
'"http://www.w3.org/TR/REC-html40/loose.dtd">')
DOCTYPE_HTML4_STRICT = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'
'"http://www.w3.org/TR/html4/strict.dtd">')
DOCTYPE_HTML4_FRAMESET = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">')
DOCTYPE_HTML5 = "<!DOCTYPE html>"
class MetaData(Base.Node):
"""
A webelement implementation of the meta tag
"""
__slots__ = ()
tagName = "meta"
displayable = False
properties = Base.Node.properties.copy()
properties['value'] = {'action':'setValue'}
properties['name'] = {'action':'setName'}
properties['http-equiv'] = {'action':'attribute'}
def _create(self, id=None, name=None, parent=None, **kwargs):
Base.Node._create(self)
def value(self):
"""
Returns the meta tags value
"""
return self.attributes.get('content')
def setValue(self, value):
"""
Sets the meta tags value
"""
self.attributes['content'] = value
def getName(self):
"""
Returns the name of the meta tag
"""
return self.name
def setName(self, name):
"""
Sets the name of the meta tag
"""
self.name = name
def shown(self):
"""
Meta tags are never visible
"""
return False
Factory.addProduct(MetaData)
class HTTPHeader(MetaData):
"""
A webelement that represents an http header meta tag
"""
__slots__ = ()
def getName(self):
"""
Returns the headers name
"""
return self.attributes.get('http-equiv')
def setName(self, name):
"""
Sets the headers name
"""
self.attributes['http-equiv'] = name
Factory.addProduct(HTTPHeader)
class Document(Base.Node):
"""
A Node representation of the overall document that fills a single page
"""
__slots__ = ('head', 'body', 'title', 'contentType')
doctype = DOCTYPE_HTML5
tagName = "html"
properties = Base.Node.properties.copy()
properties['doctype'] = {'action':'classAttribute'}
properties['title'] = {'action':'title.setText'}
properties['contentType'] = {'action':'contentType.setValue'}
properties['xmlns'] = {'action':'attribute'}
class Head(Base.Node):
"""
Documents Head
"""
tagName = "head"
class Body(Base.Node):
"""
Documents Body
"""
tagName = "body"
class Title(Base.Node):
"""
Documents Title
"""
tagName = "title"
def _create(self, id=None, name=None, parent=None, **kwargs):
Base.Node._create(self, id=id, name=name, parent=parent)
self._textNode = self.add(Base.TextNode())
def setText(self, text):
"""
Sets the document title
"""
self._textNode.setText(text)
def text(self):
"""
Returns the document title
"""
return self._textNode.text(text)
def _create(self, id=None, name=None, parent=None, **kwargs):
Base.Node._create(self)
self.head = self.add(self.Head())
self.body = self.add(self.Body())
self.title = self.head.add(self.Title())
self.contentType = self.addHeader('Content-Type', 'text/html; charset=UTF-8')
def addMetaData(self, name=None, value="", **kwargs):
"""
Will add a meta tag based on name+value pair
"""
metaTag = self.head.add(MetaData(**kwargs))
metaTag.setName(name)
metaTag.setValue(value)
return metaTag
def addHeader(self, name, value):
"""
Will add an HTTP header pair based on name + value pair
"""
header = self.head.add(HTTPHeader())
header.setName(name)
header.setValue(value)
return header
def toHTML(self, formatted=False, *args, **kwargs):
"""
Overrides toHTML to include the doctype definition before the open tag.
"""
return self.doctype + "\n" + Base.Node.toHTML(self, formatted, *args, **kwargs)
def add(self, childElement, ensureUnique=True):
"""
Overrides add to place header elements and resources in the head
and all others in the body.
"""
if type(childElement) in [self.Head, self.Body]:
return Base.Node.add(self, childElement, ensureUnique)
elif type(childElement) == ResourceFile or childElement._tagName in ['title', 'base', 'link',
'meta', 'script', 'style']:
return self.head.add(childElement, ensureUnique)
else:
return self.body.add(childElement, ensureUnique)
Head = Document.Head
Body = Document.Body
Title = Document.Title
Factory.addProduct(Document)
| gpl-2.0 | 7,022,688,719,562,299,000 | 31.507177 | 104 | 0.584781 | false | 4.063397 | false | false | false |
jiyfeng/RSTParser | model.py | 1 | 3945 | ## model.py
## Author: Yangfeng Ji
## Date: 09-09-2014
## Time-stamp: <yangfeng 11/05/2014 20:44:25>
## Last changed: umashanthi 11/19/2014
""" As a parsing model, it includes the following functions
1, Mini-batch training on the data generated by the Data class
2, Shift-Reduce RST parsing for a given text sequence
3, Save/load parsing model
"""
from sklearn.svm import LinearSVC
from cPickle import load, dump
from parser import SRParser
from feature import FeatureGenerator
from tree import RSTTree
from util import *
from datastructure import ActionError
import gzip, sys
import numpy as np
class ParsingModel(object):
def __init__(self, vocab=None, idxlabelmap=None, clf=None):
""" Initialization
:type vocab: dict
:param vocab: mappint from feature templates to feature indices
:type idxrelamap: dict
:param idxrelamap: mapping from parsing action indices to
parsing actions
:type clf: LinearSVC
:param clf: an multiclass classifier from sklearn
"""
self.vocab = vocab
# print labelmap
self.labelmap = idxlabelmap
if clf is None:
self.clf = LinearSVC()
def train(self, trnM, trnL):
""" Perform batch-learning on parsing model
"""
self.clf.fit(trnM, trnL)
def predict(self, features):
""" Predict parsing actions for a given set
of features
:type features: list
:param features: feature list generated by
FeatureGenerator
"""
vec = vectorize(features, self.vocab)
predicted_output = self.clf.decision_function(vec)
idxs = np.argsort(predicted_output[0])[::-1]
possible_labels = []
for index in idxs:
possible_labels.append(self.labelmap[index])
return possible_labels
def savemodel(self, fname):
""" Save model and vocab
"""
if not fname.endswith('.gz'):
fname += '.gz'
D = {'clf':self.clf, 'vocab':self.vocab,
'idxlabelmap':self.labelmap}
with gzip.open(fname, 'w') as fout:
dump(D, fout)
print 'Save model into file: {}'.format(fname)
def loadmodel(self, fname):
""" Load model
"""
with gzip.open(fname, 'r') as fin:
D = load(fin)
self.clf = D['clf']
self.vocab = D['vocab']
self.labelmap = D['idxlabelmap']
print 'Load model from file: {}'.format(fname)
def sr_parse(self, texts):
""" Shift-reduce RST parsing based on model prediction
:type texts: list of string
:param texts: list of EDUs for parsing
"""
# Initialize parser
srparser = SRParser([],[])
srparser.init(texts)
# Parsing
while not srparser.endparsing():
# Generate features
stack, queue = srparser.getstatus()
# Make sure call the generator with
# same arguments as in data generation part
fg = FeatureGenerator(stack, queue)
features = fg.features()
labels = self.predict(features)
# Enumerate through all possible actions ranked based on predcition scores
for i,label in enumerate(labels):
action = label2action(label)
try:
srparser.operate(action)
break # if legal action, end the loop
except ActionError:
if i < len(labels): # if not a legal action, try the next possible action
continue
else:
print "Parsing action error with {}".format(action)
sys.exit()
tree = srparser.getparsetree()
rst = RSTTree(tree=tree)
return rst
| mit | -4,449,401,833,782,390,000 | 30.56 | 93 | 0.570089 | false | 4.388209 | false | false | false |
sherpaman/MolToolPy | bin/hbond_stat.py | 1 | 1064 | #!/usr/bin/env python
from sys import argv,stderr
#Prende in input il nome di un file che contiene, i dati di coppie di residui per ogni frame.
#Ogni riga ha il seguente formato:
#frame atom1_id res1_name res1_id atom1_name atom2_id res2_name res2_id atom2_name ...........
#0 8661 T 273 N3 8577 T 271 O2P 0.287049 4.688220
#L'output è un dizionario
#diz[(res1,res2)=frequenza
def group_values(filename):
hbond={}
local={}
resname={}
prev_frame=-1
tot_frame=0
for line in f:
flags=line.split()
frame=int(flags[0])
res1 =int(flags[3])
res2 =int(flags[7])
resname[res1]=flags[2]
resname[res2]=flags[6]
if frame<>prev_frame:
prev_frame=frame
tot_frame+=1
for k in local.keys():
try:
hbond[k]+=1
except KeyError:
hbond[k]=1
local={}
stderr.write("\rframe %d " %(frame))
if res1<=res2:
local[res1,res2]=1
else:
local[res1,res2]=1
stderr.write("\n")
return hbond
| gpl-2.0 | -7,142,410,232,880,668,000 | 23.159091 | 102 | 0.575729 | false | 2.812169 | false | false | false |
sebp/scikit-survival | sksurv/preprocessing.py | 1 | 3945 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from .column import encode_categorical
__all__ = ['OneHotEncoder']
def check_columns_exist(actual, expected):
missing_features = expected.difference(actual)
if len(missing_features) != 0:
raise ValueError("%d features are missing from data: %s" % (
len(missing_features), missing_features.tolist()
))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical columns with `M` categories into `M-1` columns according
to the one-hot scheme.
The order of non-categorical columns is preserved, encoded columns are inserted
inplace of the original column.
Parameters
----------
allow_drop : boolean, optional, default: True
Whether to allow dropping categorical columns that only consist
of a single category.
Attributes
----------
feature_names_ : pandas.Index
List of encoded columns.
categories_ : dict
Categories of encoded columns.
encoded_columns_ : list
Name of columns after encoding.
Includes names of non-categorical columns.
"""
def __init__(self, allow_drop=True):
self.allow_drop = allow_drop
def fit(self, X, y=None): # pylint: disable=unused-argument
"""Retrieve categorical columns.
Parameters
----------
X : pandas.DataFrame
Data to encode.
y :
Ignored. For compatibility with Pipeline.
Returns
-------
self : object
Returns self
"""
self.fit_transform(X)
return self
def _encode(self, X, columns_to_encode):
return encode_categorical(X, columns=columns_to_encode, allow_drop=self.allow_drop)
def fit_transform(self, X, y=None, **fit_params): # pylint: disable=unused-argument
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
y :
Ignored. For compatibility with TransformerMixin.
fit_params :
Ignored. For compatibility with TransformerMixin.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
columns_to_encode = X.select_dtypes(include=["object", "category"]).columns
x_dummy = self._encode(X, columns_to_encode)
self.feature_names_ = columns_to_encode
self.categories_ = {k: X[k].cat.categories for k in columns_to_encode}
self.encoded_columns_ = x_dummy.columns
return x_dummy
def transform(self, X):
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
check_is_fitted(self, "encoded_columns_")
check_columns_exist(X.columns, self.feature_names_)
Xt = X.copy()
for col, cat in self.categories_.items():
Xt[col].cat.set_categories(cat, inplace=True)
new_data = self._encode(Xt, self.feature_names_)
return new_data.loc[:, self.encoded_columns_]
| gpl-3.0 | -6,087,449,575,147,389,000 | 31.073171 | 91 | 0.628897 | false | 4.417693 | false | false | false |
jtbattle/wangemu | wvdutil/wvdHandler_base.py | 1 | 4196 | # Purpose: template class for file handler for the wvdutil.py program
# Author: Jim Battle
#
# Version: 1.0, 2018/09/15, JTB
# massive restructuring of the old wvdutil code base
# Version: 1.1, 2021/06/19, JTB
# get rid of bilingualism (aka python2 support);
# convert to inline type hints instead of type hint pragma comments
# Version: 1.2, 2021/06/20, JTB
# declare and use type aliases Sector and SectorList for clarity
from typing import List, Dict, Any, Tuple # pylint: disable=unused-import
from wvdTypes import Sector, SectorList, Options
class WvdHandler_base(object): # pylint: disable=useless-object-inheritance
def __init__(self):
self._errors: List[str] = []
self._warnings: List[str] = []
self._firsterr: int = 0 # which was the first sector with an error
self._firstwarn: int = 0 # which was the first sector with a warning
@staticmethod
def name() -> str:
return 'short description'
@staticmethod
def nameLong() -> str:
# optional: override with longer description if useful
return WvdHandler_base.name()
# return either "P "(rogram) or "D "(ata)
@staticmethod
def fileType() -> str:
return 'D '
# pylint: disable=unused-argument, no-self-use
def checkBlocks(self, blocks: SectorList, opts: Options) -> Dict[str, Any]:
# the opts dictionary can contain these keys:
# 'sector' = <number> -- the absolute address of the first sector
# 'used' = <number> -- the "used" field from the catalog, if it is known
# 'warnlimit' = <number> -- stop when the number of warnings is exceeded
# the return dict contains these keys:
# 'failed' = bool -- True if any errors or warnings
# 'errors' = [str] -- list of error messages
# 'warnings' = [str] -- list of warning messages
# 'lastsec' = <number> -- last valid sector before giving up
return { 'errors':0, 'warnings':0, 'lastsec':0 }
# the bool is True if this is a terminating block
# pylint: disable=unused-argument, no-self-use
def listOneBlock(self, blk: Sector, opts: Options) -> Tuple[bool, List[str]]:
# the opts dictionary can contain these keys:
# 'sector' = <number> -- the absolute address of the first sector
# 'used' = <number> -- the "used" field from the catalog, if it is known
# 'warnlimit' = <number> -- stop when the number of warnings is exceeded
return (True, [])
# if the file type doesn't have context which crosses sectors, then
# the default method will just repeated use listOneBlock
def listBlocks(self, blocks: SectorList, opts: Options) -> List[str]:
# same opts as listOneBlock
listing = []
opt = dict(opts)
for offset, blk in enumerate(blocks):
opt['secnum'] = opts['sector'] + offset
done, morelines = self.listOneBlock(blk, opt)
listing.extend(morelines)
if done: break
return listing
# utilities to be used by derived classes
def clearErrors(self) -> None:
self._errors = []
self._warnings = []
self._firsterr = 0
self._firstwarn = 0
def error(self, secnum: int, text: str) -> None:
if (not self._errors) or (secnum < self._firsterr):
self._firsterr = secnum
self._errors.append(text)
def warning(self, secnum: int, text: str) -> None:
if (not self._warnings) or (secnum < self._firstwarn):
self._firstwarn = secnum
self._warnings.append(text)
def status(self, sec: int, opts: Options) -> Dict[str, Any]:
failed = (len(self._errors) > 0) or (len(self._warnings) > opts['warnlimit'])
if self._errors:
last_good_sector = self._firsterr-1
elif self._warnings:
last_good_sector = self._firstwarn-1
else:
last_good_sector = sec
return { 'failed': failed,
'errors': self._errors,
'warnings': self._warnings,
'lastsec': last_good_sector }
| mit | -7,556,717,784,087,207,000 | 39.346154 | 87 | 0.602717 | false | 3.793852 | false | false | false |
endlessm/chromium-browser | build/win/reorder-imports.py | 4 | 4054 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'pefile'))
import pefile
def reorder_imports(input_dir, output_dir, architecture):
"""Swap chrome_elf.dll to be the first import of chrome.exe.
Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
# TODO(thakis): See if there is a reliable way to write the
# correct executable in the first place, so that this script
# only needs to verify that and not write a whole new exe.
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
# pefile mmap()s the whole executable, and then parses parts of
# it into python data structures for ease of processing.
# To write the file again, only the mmap'd data is written back,
# so modifying the parsed python objects generally has no effect.
# However, parsed raw data ends up in pe.Structure instances,
# and these all get serialized back when the file gets written.
# So things that are in a Structure must have their data set
# through the Structure, while other data must bet set through
# the set_bytes_*() methods.
pe = pefile.PE(input_image, fast_load=True)
if architecture == 'x64' or architecture == 'arm64':
assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE_PLUS
else:
assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
found_elf = False
for i, peimport in enumerate(pe.DIRECTORY_ENTRY_IMPORT):
if peimport.dll.lower() == 'chrome_elf.dll':
assert not found_elf, 'only one chrome_elf.dll import expected'
found_elf = True
if i > 0:
swap = pe.DIRECTORY_ENTRY_IMPORT[0]
# Morally we want to swap peimport.struct and swap.struct here,
# but the pe module doesn't expose a public method on Structure
# to get all data of a Structure without explicitly listing all
# field names.
# NB: OriginalFirstThunk and Characteristics are an union both at
# offset 0, handling just one of them is enough.
peimport.struct.OriginalFirstThunk, swap.struct.OriginalFirstThunk = \
swap.struct.OriginalFirstThunk, peimport.struct.OriginalFirstThunk
peimport.struct.TimeDateStamp, swap.struct.TimeDateStamp = \
swap.struct.TimeDateStamp, peimport.struct.TimeDateStamp
peimport.struct.ForwarderChain, swap.struct.ForwarderChain = \
swap.struct.ForwarderChain, peimport.struct.ForwarderChain
peimport.struct.Name, swap.struct.Name = \
swap.struct.Name, peimport.struct.Name
peimport.struct.FirstThunk, swap.struct.FirstThunk = \
swap.struct.FirstThunk, peimport.struct.FirstThunk
assert found_elf, 'chrome_elf.dll import not found'
pe.write(filename=output_image)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 5,867,402,285,721,175,000 | 41.229167 | 78 | 0.694869 | false | 3.695533 | false | false | false |
qedsoftware/commcare-hq | corehq/apps/callcenter/fixturegenerators.py | 1 | 4228 | from xml.etree import ElementTree
from datetime import datetime
import pytz
from corehq.apps.callcenter.app_parser import get_call_center_config_from_app
from casexml.apps.phone.models import OTARestoreUser
from corehq.util.soft_assert import soft_assert
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.logging import notify_exception
utc = pytz.utc
def should_sync(domain, last_sync, utcnow=None):
# definitely sync if we haven't synced before
if not last_sync or not last_sync.date:
return True
# utcnow only used in tests to mock other times
utcnow = utcnow or datetime.utcnow()
try:
timezone = domain.get_default_timezone()
except pytz.UnknownTimeZoneError:
timezone = utc
last_sync_utc = last_sync.date
# check if user has already synced today (in local timezone).
# Indicators only change daily.
last_sync_local = ServerTime(last_sync_utc).user_time(timezone).done()
current_date_local = ServerTime(utcnow).user_time(timezone).done()
if current_date_local.date() != last_sync_local.date():
return True
return False
class IndicatorsFixturesProvider(object):
id = 'indicators'
def __call__(self, restore_user, version, last_sync=None, app=None):
assert isinstance(restore_user, OTARestoreUser)
domain = restore_user.project
fixtures = []
if self._should_return_no_fixtures(domain, last_sync):
return fixtures
config = None
if app:
try:
config = get_call_center_config_from_app(app)
except:
notify_exception(None, "Error getting call center config from app", details={
'domain': app.domain,
'app_id': app.get_id
})
if config:
_assert = soft_assert(['skelly_at_dimagi_dot_com'.replace('_at_', '@').replace('_dot_', '.')])
_assert(not config.includes_legacy(), 'Domain still using legacy call center indicators', {
'domain': domain.name,
'config': config.to_json()
})
try:
fixtures.append(gen_fixture(restore_user, restore_user.get_call_center_indicators(config)))
except Exception: # blanket exception catching intended
notify_exception(None, 'problem generating callcenter fixture', details={
'user_id': restore_user.user_id,
'domain': restore_user.domain
})
return fixtures
@staticmethod
def _should_return_no_fixtures(domain, last_sync):
config = domain.call_center_config
return (
not domain or
not (config.fixtures_are_active() and config.config_is_valid()) or
not should_sync(domain, last_sync)
)
indicators_fixture_generator = IndicatorsFixturesProvider()
def gen_fixture(restore_user, indicator_set):
"""
Generate the fixture from the indicator data.
:param user: The user.
:param indicator_set: A subclass of SqlIndicatorSet
"""
"""
Example output:
indicator_set.name = 'demo'
indicator_set.get_data() = {'user_case1': {'indicator_a': 1, 'indicator_b': 2}}
<fixture id="indicators:demo" user_id="...">
<indicators>
<case id="user_case1">
<indicator_a>1</indicator_a>
<indicator_b>2</indicator_2>
</case>
</indicators>
</fixture>
"""
if indicator_set is None:
return []
name = indicator_set.name
data = indicator_set.get_data()
fixture = ElementTree.Element('fixture', attrib={
'id': ':'.join((IndicatorsFixturesProvider.id, name)),
'user_id': restore_user.user_id,
'date': indicator_set.reference_date.isoformat()
})
indicators_node = ElementTree.SubElement(fixture, 'indicators')
for case_id, indicators in data.iteritems():
group = ElementTree.SubElement(indicators_node, 'case', attrib={'id': case_id})
for name, value in indicators.items():
indicator = ElementTree.SubElement(group, name)
indicator.text = str(value)
return fixture
| bsd-3-clause | 1,972,465,104,134,683,000 | 31.274809 | 106 | 0.62228 | false | 3.996219 | true | false | false |
mikemhenry/arcade | examples/sprite_tiled_map.py | 1 | 6561 | """
Load a map stored in csv format, as exported by the program 'Tiled.'
Artwork from http://kenney.nl
"""
import arcade
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
VIEWPORT_MARGIN = 40
RIGHT_MARGIN = 150
# Physics
MOVEMENT_SPEED = 5
JUMP_SPEED = 14
GRAVITY = 0.5
def get_map():
map_file = open("map.csv")
map_array = []
for line in map_file:
line = line.strip()
map_row = line.split(",")
for index, item in enumerate(map_row):
map_row[index] = int(item)
map_array.append(map_row)
return map_array
class MyApplication(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
:param width:
:param height:
"""
super().__init__(width, height)
# Sprite lists
self.all_sprites_list = None
self.coin_list = None
# Set up the player
self.score = 0
self.player_sprite = None
self.wall_list = None
self.physics_engine = None
self.view_left = 0
self.view_bottom = 0
self.game_over = False
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.all_sprites_list = arcade.SpriteList()
self.wall_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player_sprite = arcade.Sprite("images/character.png",
SPRITE_SCALING)
self.player_sprite.center_x = 64
self.player_sprite.center_y = 270
self.all_sprites_list.append(self.player_sprite)
map_array = get_map()
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
if item == -1:
continue
elif item == 0:
wall = arcade.Sprite("images/boxCrate_double.png",
SPRITE_SCALING)
elif item == 1:
wall = arcade.Sprite("images/grassLeft.png",
SPRITE_SCALING)
elif item == 2:
wall = arcade.Sprite("images/grassMid.png",
SPRITE_SCALING)
elif item == 3:
wall = arcade.Sprite("images/grassRight.png",
SPRITE_SCALING)
wall.right = column_index * 64
wall.top = (7 - row_index) * 64
self.all_sprites_list.append(wall)
self.wall_list.append(wall)
self.physics_engine = \
arcade.PhysicsEnginePlatformer(self.player_sprite,
self.wall_list,
gravity_constant=GRAVITY)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
# Set the viewport boundaries
# These numbers set where we have 'scrolled' to.
self.view_left = 0
self.view_bottom = 0
self.game_over = False
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.all_sprites_list.draw()
# Put the text on the screen.
# Adjust the text position based on the viewport so that we don't
# scroll the text too.
distance = self.view_left + self.player_sprite.right
output = "Distance: {}".format(distance)
arcade.draw_text(output, self.view_left + 10, self.view_bottom + 20,
arcade.color.WHITE, 14)
if self.game_over:
output = "Game Over"
arcade.draw_text(output, self.view_left + 200,
self.view_bottom + 200,
arcade.color.WHITE, 30)
def on_key_press(self, key, modifiers):
"""
Called whenever the mouse moves.
"""
if key == arcade.key.UP:
if self.physics_engine.can_jump():
self.player_sprite.change_y = JUMP_SPEED
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user presses a mouse button.
"""
if key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def animate(self, delta_time):
""" Movement and game logic """
if self.view_left + self.player_sprite.right >= 5630:
self.game_over = True
# Call update on all sprites (The sprites don't do much in this
# example though.)
if not self.game_over:
self.physics_engine.update()
# --- Manage Scrolling ---
# Track if we need to change the viewport
changed = False
# Scroll left
left_bndry = self.view_left + VIEWPORT_MARGIN
if self.player_sprite.left < left_bndry:
self.view_left -= left_bndry - self.player_sprite.left
changed = True
# Scroll right
right_bndry = self.view_left + SCREEN_WIDTH - RIGHT_MARGIN
if self.player_sprite.right > right_bndry:
self.view_left += self.player_sprite.right - right_bndry
changed = True
# Scroll up
top_bndry = self.view_bottom + SCREEN_HEIGHT - VIEWPORT_MARGIN
if self.player_sprite.top > top_bndry:
self.view_bottom += self.player_sprite.top - top_bndry
changed = True
# Scroll down
bottom_bndry = self.view_bottom + VIEWPORT_MARGIN
if self.player_sprite.bottom < bottom_bndry:
self.view_bottom -= bottom_bndry - self.player_sprite.bottom
changed = True
# If we need to scroll, go ahead and do it.
if changed:
arcade.set_viewport(self.view_left,
SCREEN_WIDTH + self.view_left,
self.view_bottom,
SCREEN_HEIGHT + self.view_bottom)
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
| mit | -5,013,089,162,620,566,000 | 30.242857 | 76 | 0.53757 | false | 3.954792 | false | false | false |
anderspitman/scikit-bio | skbio/sequence/distance.py | 1 | 5233 | """
Sequence distance metrics (:mod:`skbio.sequence.distance`)
==========================================================
.. currentmodule:: skbio.sequence.distance
This module contains functions for computing distances between scikit-bio
``Sequence`` objects. These functions can be used directly or supplied to other
parts of the scikit-bio API that accept a sequence distance metric as input,
such as :meth:`skbio.sequence.Sequence.distance` and
:meth:`skbio.stats.distance.DistanceMatrix.from_iterable`.
Functions
---------
.. autosummary::
:toctree: generated/
hamming
kmer_distance
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import scipy.spatial.distance
import skbio
from skbio.util._decorator import experimental
@experimental(as_of='0.4.2')
def hamming(seq1, seq2):
"""Compute Hamming distance between two sequences.
The Hamming distance between two equal-length sequences is the proportion
of differing characters.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute Hamming distance between.
Returns
-------
float
Hamming distance between `seq1` and `seq2`.
Raises
------
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
ValueError
If `seq1` and `seq2` are not the same length.
See Also
--------
scipy.spatial.distance.hamming
Notes
-----
``np.nan`` will be returned if the sequences do not contain any characters.
This function does not make assumptions about the sequence alphabet in use.
Each sequence object's underlying sequence of characters are used to
compute Hamming distance. Characters that may be considered equivalent in
certain contexts (e.g., `-` and `.` as gap characters) are treated as
distinct characters when computing Hamming distance.
Examples
--------
>>> from skbio import Sequence
>>> from skbio.sequence.distance import hamming
>>> seq1 = Sequence('AGGGTA')
>>> seq2 = Sequence('CGTTTA')
>>> hamming(seq1, seq2)
0.5
"""
_check_seqs(seq1, seq2)
# Hamming requires equal length sequences. We are checking this here
# because the error you would get otherwise is cryptic.
if len(seq1) != len(seq2):
raise ValueError(
"Hamming distance can only be computed between sequences of equal "
"length (%d != %d)" % (len(seq1), len(seq2)))
# scipy throws a RuntimeWarning when computing Hamming distance on length 0
# input.
if not seq1:
distance = np.nan
else:
distance = scipy.spatial.distance.hamming(seq1.values, seq2.values)
return float(distance)
@experimental(as_of='0.4.2-dev')
def kmer_distance(seq1, seq2, k, overlap=True):
"""Compute the kmer distance between a pair of sequences
The kmer distance between two sequences is the fraction of kmers that are
unique to either sequence.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute kmer distance between.
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Returns
-------
float
kmer distance between `seq1` and `seq2`.
Raises
------
ValueError
If `k` is less than 1.
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
Notes
-----
kmer counts are not incorporated in this distance metric.
``np.nan`` will be returned if there are no kmers defined for the
sequences.
Examples
--------
>>> from skbio import Sequence
>>> seq1 = Sequence('ATCGGCGAT')
>>> seq2 = Sequence('GCAGATGTG')
>>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS
0.9230769230...
"""
_check_seqs(seq1, seq2)
seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap)))
seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap)))
all_kmers = seq1_kmers | seq2_kmers
if not all_kmers:
return np.nan
shared_kmers = seq1_kmers & seq2_kmers
number_unique = len(all_kmers) - len(shared_kmers)
fraction_unique = number_unique / len(all_kmers)
return fraction_unique
def _check_seqs(seq1, seq2):
# Asserts both sequences are skbio.sequence objects
for seq in seq1, seq2:
if not isinstance(seq, skbio.Sequence):
raise TypeError(
"`seq1` and `seq2` must be Sequence instances, not %r"
% type(seq).__name__)
# Asserts sequences have the same type
if type(seq1) is not type(seq2):
raise TypeError(
"Sequences must have matching type. Type %r does not match type %r"
% (type(seq1).__name__, type(seq2).__name__))
| bsd-3-clause | -1,355,334,489,428,729,900 | 28.398876 | 79 | 0.623543 | false | 3.991609 | false | false | false |
kennedyshead/home-assistant | homeassistant/components/media_player/__init__.py | 1 | 39109 | """Component to interface with various media players."""
from __future__ import annotations
import asyncio
import base64
import collections
from contextlib import suppress
import datetime as dt
import functools as ft
import hashlib
import logging
import secrets
from typing import final
from urllib.parse import urlparse
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE
from aiohttp.typedefs import LooseHeaders
import async_timeout
import voluptuous as vol
from yarl import URL
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.components.websocket_api.const import (
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_UNKNOWN_ERROR,
)
from homeassistant.const import (
HTTP_INTERNAL_SERVER_ERROR,
HTTP_NOT_FOUND,
HTTP_OK,
HTTP_UNAUTHORIZED,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_REPEAT_SET,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PLAYING,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
datetime,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.network import get_url
from homeassistant.loader import bind_hass
from .const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_GROUP_MEMBERS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_EXTRA,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_REPEAT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
ATTR_SOUND_MODE_LIST,
DOMAIN,
MEDIA_CLASS_DIRECTORY,
REPEAT_MODES,
SERVICE_CLEAR_PLAYLIST,
SERVICE_JOIN,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SERVICE_UNJOIN,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from .errors import BrowseError
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CACHE_IMAGES = "images"
CACHE_MAXSIZE = "maxsize"
CACHE_LOCK = "lock"
CACHE_URL = "url"
CACHE_CONTENT = "content"
ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16}
SCAN_INTERVAL = dt.timedelta(seconds=10)
DEVICE_CLASS_TV = "tv"
DEVICE_CLASS_SPEAKER = "speaker"
DEVICE_CLASS_RECEIVER = "receiver"
DEVICE_CLASSES = [DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER, DEVICE_CLASS_RECEIVER]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = {
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
vol.Optional(ATTR_MEDIA_EXTRA, default={}): dict,
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_SOUND_MODE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_REPEAT,
]
@bind_hass
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(
not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids
)
def _rename_keys(**keys):
"""Create validator that renames keys.
Necessary because the service schema names do not match the command parameters.
Async friendly.
"""
def rename(value):
for to_key, from_key in keys.items():
if from_key in value:
value[to_key] = value.pop(from_key)
return value
return rename
async def async_setup(hass, config):
"""Track states and offer events for media_players."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
hass.components.websocket_api.async_register_command(websocket_handle_thumbnail)
hass.components.websocket_api.async_register_command(websocket_browse_media)
hass.http.register_view(MediaPlayerImageView(component))
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {}, "async_turn_on", [SUPPORT_TURN_ON]
)
component.async_register_entity_service(
SERVICE_TURN_OFF, {}, "async_turn_off", [SUPPORT_TURN_OFF]
)
component.async_register_entity_service(
SERVICE_TOGGLE, {}, "async_toggle", [SUPPORT_TURN_OFF | SUPPORT_TURN_ON]
)
component.async_register_entity_service(
SERVICE_VOLUME_UP,
{},
"async_volume_up",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_VOLUME_DOWN,
{},
"async_volume_down",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY_PAUSE,
{},
"async_media_play_pause",
[SUPPORT_PLAY | SUPPORT_PAUSE],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY, {}, "async_media_play", [SUPPORT_PLAY]
)
component.async_register_entity_service(
SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [SUPPORT_PAUSE]
)
component.async_register_entity_service(
SERVICE_MEDIA_STOP, {}, "async_media_stop", [SUPPORT_STOP]
)
component.async_register_entity_service(
SERVICE_MEDIA_NEXT_TRACK, {}, "async_media_next_track", [SUPPORT_NEXT_TRACK]
)
component.async_register_entity_service(
SERVICE_MEDIA_PREVIOUS_TRACK,
{},
"async_media_previous_track",
[SUPPORT_PREVIOUS_TRACK],
)
component.async_register_entity_service(
SERVICE_CLEAR_PLAYLIST, {}, "async_clear_playlist", [SUPPORT_CLEAR_PLAYLIST]
)
component.async_register_entity_service(
SERVICE_VOLUME_SET,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float}
),
_rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL),
),
"async_set_volume_level",
[SUPPORT_VOLUME_SET],
)
component.async_register_entity_service(
SERVICE_VOLUME_MUTE,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean}
),
_rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED),
),
"async_mute_volume",
[SUPPORT_VOLUME_MUTE],
)
component.async_register_entity_service(
SERVICE_MEDIA_SEEK,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_SEEK_POSITION): cv.positive_float}
),
_rename_keys(position=ATTR_MEDIA_SEEK_POSITION),
),
"async_media_seek",
[SUPPORT_SEEK],
)
component.async_register_entity_service(
SERVICE_JOIN,
{vol.Required(ATTR_GROUP_MEMBERS): vol.All(cv.ensure_list, [cv.entity_id])},
"async_join_players",
[SUPPORT_GROUPING],
)
component.async_register_entity_service(
SERVICE_SELECT_SOURCE,
{vol.Required(ATTR_INPUT_SOURCE): cv.string},
"async_select_source",
[SUPPORT_SELECT_SOURCE],
)
component.async_register_entity_service(
SERVICE_SELECT_SOUND_MODE,
{vol.Required(ATTR_SOUND_MODE): cv.string},
"async_select_sound_mode",
[SUPPORT_SELECT_SOUND_MODE],
)
component.async_register_entity_service(
SERVICE_PLAY_MEDIA,
vol.All(
cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA),
_rename_keys(
media_type=ATTR_MEDIA_CONTENT_TYPE,
media_id=ATTR_MEDIA_CONTENT_ID,
enqueue=ATTR_MEDIA_ENQUEUE,
),
),
"async_play_media",
[SUPPORT_PLAY_MEDIA],
)
component.async_register_entity_service(
SERVICE_SHUFFLE_SET,
{vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean},
"async_set_shuffle",
[SUPPORT_SHUFFLE_SET],
)
component.async_register_entity_service(
SERVICE_UNJOIN, {}, "async_unjoin_player", [SUPPORT_GROUPING]
)
component.async_register_entity_service(
SERVICE_REPEAT_SET,
{vol.Required(ATTR_MEDIA_REPEAT): vol.In(REPEAT_MODES)},
"async_set_repeat",
[SUPPORT_REPEAT_SET],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class MediaPlayerEntity(Entity):
"""ABC for media player entities."""
_access_token: str | None = None
_attr_app_id: str | None = None
_attr_app_name: str | None = None
_attr_group_members: list[str] | None = None
_attr_is_volume_muted: bool | None = None
_attr_media_album_artist: str | None = None
_attr_media_album_name: str | None = None
_attr_media_artist: str | None = None
_attr_media_channel: str | None = None
_attr_media_content_id: str | None = None
_attr_media_content_type: str | None = None
_attr_media_duration: int | None = None
_attr_media_episode: str | None = None
_attr_media_image_hash: str | None
_attr_media_image_remotely_accessible: bool = False
_attr_media_image_url: str | None = None
_attr_media_playlist: str | None = None
_attr_media_position_updated_at: dt.datetime | None = None
_attr_media_position: int | None = None
_attr_media_season: str | None = None
_attr_media_series_title: str | None = None
_attr_media_title: str | None = None
_attr_media_track: int | None = None
_attr_repeat: str | None = None
_attr_shuffle: bool | None = None
_attr_sound_mode_list: list[str] | None = None
_attr_sound_mode: str | None = None
_attr_source_list: list[str] | None = None
_attr_source: str | None = None
_attr_state: str | None = None
_attr_supported_features: int = 0
_attr_volume_level: float | None = None
# Implement these for your media player
@property
def state(self) -> str | None:
"""State of the player."""
return self._attr_state
@property
def access_token(self) -> str:
"""Access token for this media player."""
if self._access_token is None:
self._access_token = secrets.token_hex(32)
return self._access_token
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self._attr_volume_level
@property
def is_volume_muted(self) -> bool | None:
"""Boolean if volume is currently muted."""
return self._attr_is_volume_muted
@property
def media_content_id(self) -> str | None:
"""Content ID of current playing media."""
return self._attr_media_content_id
@property
def media_content_type(self) -> str | None:
"""Content type of current playing media."""
return self._attr_media_content_type
@property
def media_duration(self) -> int | None:
"""Duration of current playing media in seconds."""
return self._attr_media_duration
@property
def media_position(self) -> int | None:
"""Position of current playing media in seconds."""
return self._attr_media_position
@property
def media_position_updated_at(self) -> dt.datetime | None:
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._attr_media_position_updated_at
@property
def media_image_url(self) -> str | None:
"""Image url of current playing media."""
return self._attr_media_image_url
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return self._attr_media_image_remotely_accessible
@property
def media_image_hash(self) -> str | None:
"""Hash value for media image."""
if hasattr(self, "_attr_media_image_hash"):
return self._attr_media_image_hash
url = self.media_image_url
if url is not None:
return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
url = self.media_image_url
if url is None:
return None, None
return await self._async_fetch_image_from_cache(url)
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[str | None, str | None]:
"""
Optionally fetch internally accessible image for media browser.
Must be implemented by integration.
"""
return None, None
@property
def media_title(self) -> str | None:
"""Title of current playing media."""
return self._attr_media_title
@property
def media_artist(self) -> str | None:
"""Artist of current playing media, music track only."""
return self._attr_media_artist
@property
def media_album_name(self) -> str | None:
"""Album name of current playing media, music track only."""
return self._attr_media_album_name
@property
def media_album_artist(self) -> str | None:
"""Album artist of current playing media, music track only."""
return self._attr_media_album_artist
@property
def media_track(self) -> int | None:
"""Track number of current playing media, music track only."""
return self._attr_media_track
@property
def media_series_title(self) -> str | None:
"""Title of series of current playing media, TV show only."""
return self._attr_media_series_title
@property
def media_season(self) -> str | None:
"""Season of current playing media, TV show only."""
return self._attr_media_season
@property
def media_episode(self) -> str | None:
"""Episode of current playing media, TV show only."""
return self._attr_media_episode
@property
def media_channel(self) -> str | None:
"""Channel currently playing."""
return self._attr_media_channel
@property
def media_playlist(self) -> str | None:
"""Title of Playlist currently playing."""
return self._attr_media_playlist
@property
def app_id(self) -> str | None:
"""ID of the current running app."""
return self._attr_app_id
@property
def app_name(self) -> str | None:
"""Name of the current running app."""
return self._attr_app_name
@property
def source(self) -> str | None:
"""Name of the current input source."""
return self._attr_source
@property
def source_list(self) -> list[str] | None:
"""List of available input sources."""
return self._attr_source_list
@property
def sound_mode(self) -> str | None:
"""Name of the current sound mode."""
return self._attr_sound_mode
@property
def sound_mode_list(self) -> list[str] | None:
"""List of available sound modes."""
return self._attr_sound_mode_list
@property
def shuffle(self) -> bool | None:
"""Boolean if shuffle is enabled."""
return self._attr_shuffle
@property
def repeat(self) -> str | None:
"""Return current repeat mode."""
return self._attr_repeat
@property
def group_members(self) -> list[str] | None:
"""List of members which are currently grouped together."""
return self._attr_group_members
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return self._attr_supported_features
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
async def async_turn_on(self):
"""Turn the media player on."""
await self.hass.async_add_executor_job(self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
async def async_turn_off(self):
"""Turn the media player off."""
await self.hass.async_add_executor_job(self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self.hass.async_add_executor_job(self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self.hass.async_add_executor_job(self.set_volume_level, volume)
def media_play(self):
"""Send play command."""
raise NotImplementedError()
async def async_media_play(self):
"""Send play command."""
await self.hass.async_add_executor_job(self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
async def async_media_pause(self):
"""Send pause command."""
await self.hass.async_add_executor_job(self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
async def async_media_stop(self):
"""Send stop command."""
await self.hass.async_add_executor_job(self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
async def async_media_previous_track(self):
"""Send previous track command."""
await self.hass.async_add_executor_job(self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
async def async_media_next_track(self):
"""Send next track command."""
await self.hass.async_add_executor_job(self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
async def async_media_seek(self, position):
"""Send seek command."""
await self.hass.async_add_executor_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
await self.hass.async_add_executor_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
async def async_select_source(self, source):
"""Select input source."""
await self.hass.async_add_executor_job(self.select_source, source)
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
raise NotImplementedError()
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
await self.hass.async_add_executor_job(self.select_sound_mode, sound_mode)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
async def async_clear_playlist(self):
"""Clear players playlist."""
await self.hass.async_add_executor_job(self.clear_playlist)
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
raise NotImplementedError()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self.hass.async_add_executor_job(self.set_shuffle, shuffle)
def set_repeat(self, repeat):
"""Set repeat mode."""
raise NotImplementedError()
async def async_set_repeat(self, repeat):
"""Set repeat mode."""
await self.hass.async_add_executor_job(self.set_repeat, repeat)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_features & SUPPORT_PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_features & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_features & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_features & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_features & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_features & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_features & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOURCE)
@property
def support_select_sound_mode(self):
"""Boolean if select sound mode command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOUND_MODE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_features & SUPPORT_CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
"""Boolean if shuffle is supported."""
return bool(self.supported_features & SUPPORT_SHUFFLE_SET)
@property
def support_grouping(self):
"""Boolean if player grouping is supported."""
return bool(self.supported_features & SUPPORT_GROUPING)
async def async_toggle(self):
"""Toggle the power on the media player."""
if hasattr(self, "toggle"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.toggle)
return
if self.state in [STATE_OFF, STATE_IDLE]:
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_up"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.volume_up)
return
if self.volume_level < 1 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(min(1, self.volume_level + 0.1))
async def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_down"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.volume_down)
return
if self.volume_level > 0 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(max(0, self.volume_level - 0.1))
async def async_media_play_pause(self):
"""Play or pause the media player."""
if hasattr(self, "media_play_pause"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.media_play_pause)
return
if self.state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
if self.media_image_remotely_accessible:
return self.media_image_url
return self.media_image_local
@property
def media_image_local(self):
"""Return local url to media image."""
image_hash = self.media_image_hash
if image_hash is None:
return None
return (
f"/api/media_player_proxy/{self.entity_id}?"
f"token={self.access_token}&cache={image_hash}"
)
@property
def capability_attributes(self):
"""Return capability attributes."""
supported_features = self.supported_features or 0
data = {}
if supported_features & SUPPORT_SELECT_SOURCE:
source_list = self.source_list
if source_list:
data[ATTR_INPUT_SOURCE_LIST] = source_list
if supported_features & SUPPORT_SELECT_SOUND_MODE:
sound_mode_list = self.sound_mode_list
if sound_mode_list:
data[ATTR_SOUND_MODE_LIST] = sound_mode_list
return data
@final
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
return None
state_attr = {}
for attr in ATTR_TO_PROPERTY:
value = getattr(self, attr)
if value is not None:
state_attr[attr] = value
if self.media_image_remotely_accessible:
state_attr["entity_picture_local"] = self.media_image_local
if self.support_grouping:
state_attr[ATTR_GROUP_MEMBERS] = self.group_members
return state_attr
async def async_browse_media(
self,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> BrowseMedia:
"""Return a BrowseMedia instance.
The BrowseMedia instance will be used by the
"media_player/browse_media" websocket command.
"""
raise NotImplementedError()
def join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
raise NotImplementedError()
async def async_join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
await self.hass.async_add_executor_job(self.join_players, group_members)
def unjoin_player(self):
"""Remove this player from any group."""
raise NotImplementedError()
async def async_unjoin_player(self):
"""Remove this player from any group."""
await self.hass.async_add_executor_job(self.unjoin_player)
async def _async_fetch_image_from_cache(self, url):
"""Fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[CACHE_IMAGES]
cache_maxsize = ENTITY_IMAGE_CACHE[CACHE_MAXSIZE]
if urlparse(url).hostname is None:
url = f"{get_url(self.hass)}{url}"
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock()}
async with cache_images[url][CACHE_LOCK]:
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT]
(content, content_type) = await self._async_fetch_image(url)
async with cache_images[url][CACHE_LOCK]:
cache_images[url][CACHE_CONTENT] = content, content_type
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
async def _async_fetch_image(self, url):
"""Retrieve an image."""
content, content_type = (None, None)
websession = async_get_clientsession(self.hass)
with suppress(asyncio.TimeoutError), async_timeout.timeout(10):
response = await websession.get(url)
if response.status == HTTP_OK:
content = await response.read()
content_type = response.headers.get(CONTENT_TYPE)
if content_type:
content_type = content_type.split(";")[0]
if content is None:
_LOGGER.warning("Error retrieving proxied image from %s", url)
return content, content_type
def get_browse_image_url(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> str:
"""Generate an url for a media browser image."""
url_path = (
f"/api/media_player_proxy/{self.entity_id}/browse_media"
f"/{media_content_type}/{media_content_id}"
)
url_query = {"token": self.access_token}
if media_image_id:
url_query["media_image_id"] = media_image_id
return str(URL(url_path).with_query(url_query))
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
extra_urls = [
url + "/browse_media/{media_content_type}/{media_content_id}",
]
def __init__(self, component):
"""Initialize a media player view."""
self.component = component
async def get(
self,
request: web.Request,
entity_id: str,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> web.Response:
"""Start a get request."""
player = self.component.get_entity(entity_id)
if player is None:
status = HTTP_NOT_FOUND if request[KEY_AUTHENTICATED] else HTTP_UNAUTHORIZED
return web.Response(status=status)
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") == player.access_token
)
if not authenticated:
return web.Response(status=HTTP_UNAUTHORIZED)
if media_content_type and media_content_id:
media_image_id = request.query.get("media_image_id")
data, content_type = await player.async_get_browse_image(
media_content_type, media_content_id, media_image_id
)
else:
data, content_type = await player.async_get_media_image()
if data is None:
return web.Response(status=HTTP_INTERNAL_SERVER_ERROR)
headers: LooseHeaders = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player_thumbnail",
vol.Required("entity_id"): cv.entity_id,
}
)
@websocket_api.async_response
async def websocket_handle_thumbnail(hass, connection, msg):
"""Handle get media player cover command.
Async friendly.
"""
component = hass.data[DOMAIN]
player = component.get_entity(msg["entity_id"])
if player is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
_LOGGER.warning(
"The websocket command media_player_thumbnail is deprecated. Use /api/media_player_proxy instead"
)
data, content_type = await player.async_get_media_image()
if data is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail"
)
)
return
await connection.send_big_result(
msg["id"],
{
"content_type": content_type,
"content": base64.b64encode(data).decode("utf-8"),
},
)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player/browse_media",
vol.Required("entity_id"): cv.entity_id,
vol.Inclusive(
ATTR_MEDIA_CONTENT_TYPE,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
vol.Inclusive(
ATTR_MEDIA_CONTENT_ID,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
}
)
@websocket_api.async_response
async def websocket_browse_media(hass, connection, msg):
"""
Browse media available to the media_player entity.
To use, media_player integrations can implement MediaPlayerEntity.async_browse_media()
"""
component = hass.data[DOMAIN]
player: MediaPlayerDevice | None = component.get_entity(msg["entity_id"])
if player is None:
connection.send_error(msg["id"], "entity_not_found", "Entity not found")
return
if not player.supported_features & SUPPORT_BROWSE_MEDIA:
connection.send_message(
websocket_api.error_message(
msg["id"], ERR_NOT_SUPPORTED, "Player does not support browsing media"
)
)
return
media_content_type = msg.get(ATTR_MEDIA_CONTENT_TYPE)
media_content_id = msg.get(ATTR_MEDIA_CONTENT_ID)
try:
payload = await player.async_browse_media(media_content_type, media_content_id)
except NotImplementedError:
_LOGGER.error(
"%s allows media browsing but its integration (%s) does not",
player.entity_id,
player.platform.platform_name,
)
connection.send_message(
websocket_api.error_message(
msg["id"],
ERR_NOT_SUPPORTED,
"Integration does not support browsing media",
)
)
return
except BrowseError as err:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_UNKNOWN_ERROR, str(err))
)
return
# For backwards compat
if isinstance(payload, BrowseMedia):
payload = payload.as_dict()
else:
_LOGGER.warning("Browse Media should use new BrowseMedia class")
connection.send_result(msg["id"], payload)
class MediaPlayerDevice(MediaPlayerEntity):
"""ABC for media player devices (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"MediaPlayerDevice is deprecated, modify %s to extend MediaPlayerEntity",
cls.__name__,
)
class BrowseMedia:
"""Represent a browsable media file."""
def __init__(
self,
*,
media_class: str,
media_content_id: str,
media_content_type: str,
title: str,
can_play: bool,
can_expand: bool,
children: list[BrowseMedia] | None = None,
children_media_class: str | None = None,
thumbnail: str | None = None,
) -> None:
"""Initialize browse media item."""
self.media_class = media_class
self.media_content_id = media_content_id
self.media_content_type = media_content_type
self.title = title
self.can_play = can_play
self.can_expand = can_expand
self.children = children
self.children_media_class = children_media_class
self.thumbnail = thumbnail
def as_dict(self, *, parent: bool = True) -> dict:
"""Convert Media class to browse media dictionary."""
if self.children_media_class is None:
self.calculate_children_class()
response = {
"title": self.title,
"media_class": self.media_class,
"media_content_type": self.media_content_type,
"media_content_id": self.media_content_id,
"can_play": self.can_play,
"can_expand": self.can_expand,
"children_media_class": self.children_media_class,
"thumbnail": self.thumbnail,
}
if not parent:
return response
if self.children:
response["children"] = [
child.as_dict(parent=False) for child in self.children
]
else:
response["children"] = []
return response
def calculate_children_class(self) -> None:
"""Count the children media classes and calculate the correct class."""
if self.children is None or len(self.children) == 0:
return
self.children_media_class = MEDIA_CLASS_DIRECTORY
proposed_class = self.children[0].media_class
if all(child.media_class == proposed_class for child in self.children):
self.children_media_class = proposed_class
| apache-2.0 | 7,419,448,609,892,268,000 | 30.31225 | 105 | 0.620727 | false | 3.920702 | false | false | false |
Bdanilko/EdPy | src/lib/program.py | 1 | 21793 | #!/usr/bin/env python2
# * **************************************************************** **
# File: program.py
# Requires: Python 2.7+ (but not Python 3.0+)
# Note: For history, changes and dates for this file, consult git.
# Author: Brian Danilko, Likeable Software ([email protected])
# Copyright 2015-2017 Microbric Pty Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (in the doc/licenses directory)
# for more details.
#
# * **************************************************************** */
""" Module contains Objects that represent the Ed.Py program """
from __future__ import print_function
from __future__ import absolute_import
class EdPyError(Exception):
def __init__(self):
pass
class ParseError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class OptError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class CompileError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class AssemblerError(EdPyError):
def __init__(self, rawmsg=""):
self.rawmsg = rawmsg
class UnclassifiedError(Exception):
def __init__(self, rawmsg):
self.rawmsg = rawmsg
class Marker(object):
"""Mark each source line (but not worrying about column number)"""
def __init__(self, line, col=None):
self.kind = "Marker"
self.line = line
self.col = col
def GetValues(self):
return []
def GetTarget(self):
return None
def __repr__(self):
return "<program.Marker source line:{0}>".format(self.line)
class ControlMarker(object):
"""Marks start/else/end of If structures, While loops, For loops
and Boolean Checks (for short-circuit evaluation). This marks a
series of locations that tests can jump to."""
def __init__(self, markerNumber, name, end="start"):
self.kind = "ControlMarker"
self.num = markerNumber
self.name = name # string - type of loop: "If", "While", "For", "Or", "And"
self.end = end # a string - one of "start", "else", "end"
self.CheckData()
def GetNumber(self):
return self.num
def CheckData(self):
if (self.name not in ("If", "While", "For", "Or", "And")):
raise UnclassifiedError("Invalid program.ControlMarker() name.")
if (self.end not in ("start", "else", "end")):
raise UnclassifiedError("Invalid program.ControlMarker() end.")
def GetValues(self):
return []
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.ControlMarker marker:{0} {1} {2}>".format(self.num, self.name, self.end)
return msg
class LoopControl(object):
"""Used at the top of If and While loops (where a test needs to be evaluated).
The markerNumber is the same as used in ControlMarkers, so jumps to locations
marked by the corresponding ControlMarker will be done."""
def __init__(self, markerNumber, name=None, test=None):
self.kind = "LoopControl"
self.num = markerNumber
self.name = name # a string "If", "While"
self.test = test # a Value object. if evaluates to 0 then False, else True
def GetValues(self):
return [self.test]
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.LoopControl {0}, name:{1}, test:{2}>".format(
self.num, self.name, self.test)
return msg
class LoopModifier(object):
"""Mark, inside ControlMarkers, Breaks and Continues. As the markerNumber
is the same as the corresponding ControlMarker markerNumber, jumps to the
"start" or "end" is easy."""
def __init__(self, markerNumber, name=None):
self.kind = "LoopModifier"
self.num = markerNumber
self.name = name # a string "Pass", "Break", "Continue"
def GetValues(self):
return []
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.LoopModifier {0}, name:{1}>".format(
self.num, self.name)
return msg
class ForControl(object):
"""In a for loop, this will check that arrayValue is still inside
the array. If not a jump to the "end" of the corresponding ControlMarker
will be made."""
def __init__(self, markerNumber, arrayValue=None,
constantLimit=None, currentValue=None):
self.kind = "ForControl"
self.num = markerNumber
self.arrayValue = arrayValue # a value with name and iVariable
self.constantLimit = constantLimit # a value
self.currentValue = currentValue # a value
if ((self.arrayValue is None and self.constantLimit is None) or
(self.arrayValue is not None and self.constantLimit is not None) or
(self.currentValue is None and self.constantLimit is not None) or
(self.currentValue is not None and self.constantLimit is None)):
raise UnclassifiedError("Invalid program.ForControl() condition.")
def IsRange(self):
return self.constantLimit is not None
def IsArray(self):
return self.arrayValue is not None
def GetValues(self):
if (self.IsArray()):
return [self.arrayValue]
else:
return [self.constantLimit, self.currentValue]
def GetTarget(self):
return None
def __repr__(self):
msg = "<program.ForControl {0}, ".format(self.num)
if (self.IsArray()):
msg += "arrayValue:{0}>".format(self.arrayValue)
else:
msg += "constantLimit:{0}, currentValue:{1}>".format(self.constantLimit, self.currentValue)
return msg
class BoolCheck(object):
"""In a BoolOp, there is a need to short-curcuit evaluation on pass (or) or
failure (and). This object is used in each location where a value is
checked, and possible short-curcuit eval. may require a jump to the
"end" of the corresponding ControlMarker"""
def __init__(self, markerNumber, op=None, value=None, target=None):
"""An binary operation on constants or variables, assigned to a variable"""
self.kind = "BoolCheck"
self.num = markerNumber
self.op = op # a string - the boolean op ("Or", "And", "Done")
# Done signifies to put the non-shortcircuit value in target
self.value = value # a Value object which has the left result of the op
self.target = target # a Value object which gets the result on short-circuit
def GetValues(self):
return [self.value]
def GetTarget(self):
return self.target
def __repr__(self):
return "<program.BoolCheck {0} {1} check:{2}, target{3}>".format(
self.num, self.op, self.value, self.target)
class Value(object):
"""Stores an integer variable or constant or string constant, and depending on where it is used
in the other objects, can represent a STORE or a LOAD. Note that for a
STORE, this object can not represent a constant"""
def __init__(self, constant=None, name=None, iConstant=None, iVariable=None,
strConst=None, listConst=None,
tsRef=None, listRef=None, objectRef=None):
self.kind = "Value"
self.name = name # The name of the variable
self.indexConstant = iConstant # if not None, then the value is a slice at this index
self.indexVariable = iVariable
self.constant = constant # if not None, then this is the value (integer)
self.strConst = strConst # if not None, then a string
self.listConst = listConst # if not None, then a list
self.tsRef = tsRef # if not None, then a reference to a tunestring variable
self.listRef = listRef # if not None, then a reference to a list variable
self.objectRef = objectRef # if not None, then a reference to an object variable
self.loopTempStart = 9999 # All temps above this number are loop control temps
# check that the object has been created consistently
if (((self.IsIntConst()) and
((self.name is not None) or self.IsSlice() or
self.IsStrConst() or self.IsListConst() or self.IsRef())) or
((self.IsStrConst()) and
((self.name is not None) or self.IsSlice() or self.IsRef() or
self.IsListConst() or self.IsIntConst())) or
((self.IsListConst()) and
((self.name is not None) or self.IsSlice() or self.IsRef() or
self.IsStrConst() or self.IsIntConst())) or
(self.IsRef() and
((self.name is not None) or self.IsSlice() or
self.IsStrConst() or self.IsListConst() or self.IsIntConst())) or
((self.indexConstant is not None) and (self.indexVariable is not None)) or
((self.indexConstant is not None) and (self.name is None)) or
((self.indexVariable is not None) and (self.name is None)) or
((self.tsRef is not None) and
((self.listRef is not None) or (self.objectRef is not None))) or
((self.listRef is not None) and
((self.tsRef is not None) or (self.objectRef is not None))) or
((self.objectRef is not None) and
((self.listRef is not None) or (self.tsRef is not None)))):
raise UnclassifiedError("Invalid program.Value() constructor arguments")
def IsIntConst(self):
return self.constant is not None
def IsStrConst(self):
return (self.strConst is not None)
def IsListConst(self):
return (self.listConst is not None)
def IsTSRef(self):
return self.tsRef is not None
def IsListRef(self):
return self.listRef is not None
def IsObjRef(self):
return self.objectRef is not None
def IsRef(self):
return self.IsTSRef() or self.IsListRef() or self.IsObjRef()
def IsConstant(self):
return self.IsIntConst() or self.IsStrConst() or self.IsListConst()
def IsSimpleVar(self):
return (not (self.IsConstant() or self.IsSlice() or self.IsRef()))
def IsSlice(self):
return self.indexConstant is not None or self.indexVariable is not None
def IsDotted(self):
if (not self.IsTemp()):
left, sep, right = self.name.partition(self.name)
if (right != ""):
return True
return False
def IsTemp(self):
if self.IsSimpleVar():
if type(self.name) is int:
return True
return False
def IsSimpleTemp(self):
return self.IsTemp() and (self.name < self.loopTempStart)
def IsSliceWithSimpleTempIndex(self):
return (self.IsSlice() and self.indexVariable is not None and
type(self.indexVariable) is int and (self.indexVariable < self.loopTempStart))
def IsSliceWithVarIndex(self):
return self.IsSlice() and self.indexVariable is not None and type(self.indexVariable) is not int
def IsAssignable(self):
return not (self.IsRef() or self.IsConstant())
def UsesValue(self, otherValue):
if (otherValue.IsSimpleVar()):
if ((self.IsSimpleVar() and self.name == otherValue.name) or
(self.IsSlice() and self.indexVariable == otherValue.name)):
return True
elif (otherValue.IsSlice()):
return self == otherValue
return False
def Name(self):
if self.IsConstant():
return "????"
elif not self.IsSlice():
if type(self.name) is int:
return "TEMP-" + str(self.name)
else:
return self.name
elif self.indexConstant is not None:
return self.name + "[" + str(self.indexConstant) + "]"
elif type(self.indexVariable) is int:
return self.name + "[TEMP-" + str(self.indexVariable) + "]"
else:
return self.name + "[" + self.indexVariable + "]"
def __eq__(self, rhs):
return ((self.kind == rhs.kind) and
(self.name == rhs.name) and
(self.indexConstant == rhs.indexConstant) and
(self.indexVariable == rhs.indexVariable) and
(self.constant == rhs.constant) and
(self.strConst == rhs.strConst) and
(self.listConst == rhs.listConst) and
(self.tsRef == rhs.tsRef) and
(self.listRef == rhs.listRef) and
(self.objectRef == rhs.objectRef))
def GetValues(self):
return [self]
def GetTarget(self):
return None
def __repr__(self):
if self.constant is not None:
return "<program.Value const:{0}>".format(self.constant)
elif self.IsStrConst():
return "<program.Value const:\"{0}\">".format(self.strConst)
elif self.IsListConst():
return "<program.Value const:{0}>".format(self.listConst)
elif self.IsTSRef():
return "<program.Value T_Ref:{0}>".format(self.tsRef)
elif self.IsListRef():
return "<program.Value L_Ref:{0}>".format(self.listRef)
elif self.IsObjRef():
return "<program.Value O_Ref:{0}>".format(self.objectRef)
else:
return "<program.Value name:{0}>".format(self.Name())
class UAssign(object):
"""Represent an Unary Op with assignment to a variable (target)"""
def __init__(self, target=None, op=None, operand=None):
"""A unary operation on constants or variables, assigned to a variable"""
self.kind = "UAssign"
self.target = target # a value object
self.operation = op # a unary operation (could be UAdd for identity
self.operand = operand # (used for binary op or unary op) if used then a Value object
def GetValues(self):
if (self.operand is None):
return []
else:
return [self.operand]
def GetTarget(self):
return self.target
def __repr__(self):
msg = "<program.UAssign {0} = ".format(self.target)
msg += "{0} {1}>".format(self.operation, self.operand)
return msg
class BAssign(object):
"""Represent a Binary Op (including logical tests) with assignment to
a variable (target)"""
def __init__(self, target=None, left=None, op=None, right=None):
"""An binary operation on constants or variables, assigned to a variable"""
self.kind = "BAssign"
self.target = target # a value object
self.left = left # a Value object
self.operation = op # binary operation
self.right = right # a Value object
def GetValues(self):
return [self.left, self.right]
def GetTarget(self):
return self.target
def __repr__(self):
msg = "<program.BAssign {0} = ".format(self.target)
msg += "{0} {1} {2}>".format(self.left, self.operation, self.right)
return msg
class Call(object):
"""Calling a function, optionally assigning the result to a variable
(if self.target is not None)."""
def __init__(self, target=None, funcName=None, args=[]):
self.kind = "Call"
self.target = target # a Value object OR CAN BE NONE!
self.funcName = funcName # a String
self.args = args # each arg is a Value object
def GetValues(self):
return self.args
def GetTarget(self):
if (self.target is None):
return None
else:
return self.target
def __repr__(self):
msg = "<program.Call "
if (self.target is not None):
msg += "{0} = ".format(self.target)
msg += "name:{0} with args:{1}>".format(self.funcName, self.args)
return msg
class Return(object):
"""Return an explicit value (an int) or nothing from the function"""
def __init__(self, returnValue=None):
self.kind = "Return"
self.returnValue = returnValue
def IsVoidReturn(self):
return self.returnValue is None
def GetValues(self):
if self.returnValue is None:
return []
else:
return [self.returnValue]
def GetTarget(self):
return None
def __repr__(self):
return "<program.Return {0}>".format(self.returnValue)
# ######## Top level objects ##############################
class Function(object):
def __init__(self, name, internalFunc = False):
self.kind = "Function"
self.name = name
self.docString = ""
self.internalFunction = internalFunc
self.globalAccess = [] # Global variable names can write too
self.localVar = {} # local variable types (including temps)
self.args = []
self.callsTo = [] # functions called from this function
self.maxSimpleTemps = 0 # Number of integer temps needed,
# they will be from 0 - (maxSimpleTemps - 1).
self.body = [] # contains objects of type 'Op', 'Call'
self.returnsValue = False # explicit return with a value
self.returnsNone = False # explicit return but with no value
def __repr__(self):
msg = "<program.Function name:{0}, doc:|{1}|, ".format(
self.name, self.docString)
msg += "args:{0}, lclVars:{1}, glbWriteVars:{2}, maxSimpleTemps:{3}, internal:{4}".format(
self.args, self.localVar, self.globalAccess, self.maxSimpleTemps, self.internalFunction)
return msg + "returnsValue:{0}, calls:{1}, body:{2}>".format(
self.returnsValue, self.callsTo, self.body)
def IsInternalFunction(self):
return self.internalFunction
class Class(object):
def __init__(self, name):
self.kind = "Class"
self.name = name
self.docString = ""
self.funcNames = []
def __repr__(self):
return "<program.Class name:{}, doc:|{}|, funcNames:{}>".format(
self.name, self.docString, self.funcNames)
class Program(object):
def __init__(self):
self.kind = "Program"
self.EdVariables = {}
self.Import = []
mainFunction = Function("__main__")
self.Function = {"__main__": mainFunction}
self.FunctionSigDict = {}
self.EventHandlers = {}
self.globalVar = {}
self.GlobalTypeDict = {}
self.Class = {}
self.indent = 0
def __repr__(self):
return "<program.Program Import:{}, Global:{}, Function:{}, Class:{}>".format(
self.Import, self.globalVar, self.Function, self.Class)
def Print(self, prefix="", *vars):
if (prefix == "" and len(vars) == 0):
print()
else:
if (prefix.startswith('\n')):
print()
prefix = prefix[1:]
indentSpaces = " " * (self.indent)
if (prefix):
print(indentSpaces, prefix, sep='', end='')
else:
print(indentSpaces, end='')
for v in vars:
print(' ', v, sep='', end='')
print()
def Dump(self, filterOutInternals=True):
"""Dump the full program"""
self.Print("Program")
self.Print("\Edison variables:", self.EdVariables)
self.Print("\nImports:", self.Import)
self.Print("\nGlobals:", self.globalVar)
self.Print("\nClasses:", self.Class)
self.Print("\nFunctions:", self.Function.keys())
self.Print("\nFunction Sigs:", self.FunctionSigDict)
self.Print("\nEvent Handlers:", self.EventHandlers)
self.Print("\nFunction Details:")
self.indent += 2
sigsPrinted = []
for i in self.Function:
if (filterOutInternals and self.Function[i].IsInternalFunction()):
continue
self.Print()
f = self.Function[i]
if (f.IsInternalFunction()):
name = "{}-internal".format(i)
else:
name = i
self.Print("", name)
self.indent += 2
self.Print("Args:", f.args)
if (i in self.FunctionSigDict):
sigsPrinted.append(i)
self.Print("Signature:", self.FunctionSigDict[i])
self.Print("Globals can write:", f.globalAccess)
self.Print("Local vars:", f.localVar)
self.Print("Max simple temps:", f.maxSimpleTemps)
self.Print("Functions called:", f.callsTo)
self.indent += 2
for l in f.body:
if (l.kind == "Marker"):
self.Print()
self.Print("", l)
self.indent -= 4
self.indent -= 2
# header = "\nExternal functions:"
# for i in self.FunctionSigDict:
# if (i not in sigsPrinted):
# if header:
# self.Print(header)
# header = None
# self.Print("External function:", i)
# self.indent += 2
# self.Print("Signature:", self.FunctionSigDict[i])
# self.indent -= 2
| gpl-2.0 | -59,243,137,877,478,696 | 33.757576 | 104 | 0.581976 | false | 4.037984 | false | false | false |
kevinkellyspacey/standalone-dell-recovery | Dell/recovery_xml.py | 1 | 5532 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# «recovery_xml» - Helper Class for parsing and using a bto.xml
#
# Copyright (C) 2010-2011, Dell Inc.
#
# Author:
# - Mario Limonciello <[email protected]>
#
# This is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this application; if not, write to the Free Software Foundation, Inc., 51
# Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##################################################################################
import xml.dom.minidom
import codecs
import os
import sys
if sys.version >= '3':
text_type = str
binary_type = bytes
else:
text_type = unicode
binary_type = str
def utf8str(old):
if isinstance(old, text_type):
return old
else:
return text_type(binary_type(old), 'utf-8', errors='ignore')
class BTOxml:
def __init__(self):
self.dom = None
self.new = False
self.load_bto_xml()
def set_base(self, name, md5=''):
"""Sets the base image"""
self.replace_node_contents('base', name)
if md5:
self.dom.getElementsByTagName('base')[0].setAttribute('md5', md5)
def append_fish(self, fish_type, name, md5='', srv=''):
"""Appends a fish package"""
elements = self.dom.getElementsByTagName('fish')
new_element = self.dom.createElement(fish_type)
if md5:
new_element.setAttribute('md5', md5)
if srv:
new_element.setAttribute('srv', srv)
new_node = self.dom.createTextNode(name)
new_element.appendChild(new_node)
elements[0].appendChild(new_element)
def fetch_node_contents(self, tag):
"""Fetches all children of a tag"""
elements = self.dom.getElementsByTagName(tag)
values = text_type('')
if len(elements) > 1:
values = []
if elements:
for element in elements:
child = element.firstChild
if child:
if len(elements) > 1:
values.append(child.nodeValue.strip())
else:
values = child.nodeValue.strip()
return values
def replace_node_contents(self, tag, new):
"""Replaces a node contents (that we assume exists)"""
elements = self.dom.getElementsByTagName(tag)
if not elements:
print("Missing elements for tag")
return
if elements[0].hasChildNodes():
for node in elements[0].childNodes:
elements[0].removeChild(node)
noob = self.dom.createTextNode(utf8str(new))
elements[0].appendChild(noob)
def load_bto_xml(self, fname=None):
"""Initialize an XML file into memory"""
def create_top_level(dom):
"""Initializes a top level document"""
element = dom.createElement('bto')
dom.appendChild(element)
return element
def create_tag(dom, tag, append_to):
"""Create a subtag as necessary"""
element = dom.getElementsByTagName(tag)
if element:
element = element[0]
else:
element = dom.createElement(tag)
append_to.appendChild(element)
return element
if fname:
self.new = False
try:
if os.path.exists(fname):
with open(fname, 'rb') as f:
fname = f.read()
self.dom = xml.dom.minidom.parseString(utf8str(fname))
except xml.parsers.expat.ExpatError:
print("Damaged XML file, regenerating")
if not (fname and self.dom):
self.new = True
self.dom = xml.dom.minidom.Document()
#test for top level bto object
if self.dom.firstChild and self.dom.firstChild.localName != 'bto':
self.dom.removeChild(self.dom.firstChild)
if not self.dom.firstChild:
bto = create_top_level(self.dom)
else:
bto = self.dom.getElementsByTagName('bto')[0]
#create all our second and third level tags that are supported
for tag in ['date', 'versions', 'base', 'fid', 'fish', 'logs']:
element = create_tag(self.dom, tag, bto)
subtags = []
if tag == 'versions':
subtags = ['os', 'iso', 'generator', 'bootstrap', 'ubiquity']
elif tag == 'fid':
subtags = ['git_tag', 'deb_archive']
elif tag == 'logs':
subtags = ['syslog', 'debug']
for subtag in subtags:
create_tag(self.dom, subtag, element)
def write_xml(self, fname):
"""Writes out a BTO XML file based on the current data"""
with codecs.open(fname, 'w', 'utf-8') as wfd:
if self.new:
self.dom.writexml(wfd, "", " ", "\n", encoding='utf-8')
else:
self.dom.writexml(wfd, encoding='utf-8')
| gpl-2.0 | -9,137,183,234,520,056,000 | 34.677419 | 82 | 0.565099 | false | 4.081181 | false | false | false |
BarrelfishOS/barrelfish | tools/harness/machines/gem5.py | 1 | 5282 | ##########################################################################
# Copyright (c) 2012-2016 ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
# Quirks:
# * this is only running in single-core mode, since bootarm=0 is
# used in above mentioned menu.lst
import os, signal, tempfile, subprocess, shutil, time
import debug, machines
from machines import ARMSimulatorBase, MachineFactory, ARMSimulatorOperations
GEM5_PATH = '/home/netos/tools/gem5/gem5-stable-1804'
# gem5 takes quite a while to come up. If we return right away,
# telnet will be opened too early and fails to connect
#
# SG, 2016-10-07: If this is too high, however, and we have an
# early-boot bug gem5 will exit before telnet connects, and we do
# not get the gem5 output at all
GEM5_START_TIMEOUT = 1 # in seconds
class Gem5MachineBase(ARMSimulatorBase):
imagename = "armv7_a15ve_gem5_image"
def __init__(self, options, operations, **kwargs):
super(Gem5MachineBase, self).__init__(options, operations, **kwargs)
def get_buildall_target(self):
return "VExpressEMM-A15"
def get_boot_timeout(self):
# we set this to 10 mins since gem5 is very slow
return 600
def get_test_timeout(self):
# give gem5 tests enough time to complete: skb initialization takes
# about 10 minutes, so set timeout to 25 minutes.
# RH, 2018-08-08 newer version of gem5 is even slower ...
# increased to 50 mins
return 50 * 60
class Gem5MachineBaseOperations(ARMSimulatorOperations):
def __init__(self, machine):
super(Gem5MachineBaseOperations, self).__init__(machine)
self.simulator_start_timeout = GEM5_START_TIMEOUT
# menu.lst template for gem5 is special
# XXX: current template does not work because gem5 coreboot NYI
self.menulst_template = "menu.lst.armv7_a15ve_gem5"
def get_tftp_dir(self):
if self.tftp_dir is None:
debug.verbose('creating temporary directory for Gem5 files')
self.tftp_dir = tempfile.mkdtemp(prefix='harness_gem5_')
debug.verbose('Gem5 install directory is %s' % self.tftp_dir)
return self.tftp_dir
def reboot(self):
self._kill_child()
cmd = self._get_cmdline()
self.telnet_port = 3456
debug.verbose('starting "%s" in gem5.py:reboot' % ' '.join(cmd))
devnull = open('/dev/null', 'w')
# remove ubuntu chroot from environment to make sure gem5 finds the
# right shared libraries
env = dict(os.environ)
if 'LD_LIBRARY_PATH' in env:
del env['LD_LIBRARY_PATH']
self.child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull, env=env)
time.sleep(GEM5_START_TIMEOUT)
class Gem5MachineARM(Gem5MachineBase):
def __init__(self, options, operations, **kwargs):
super(Gem5MachineARM, self).__init__(options, operations, **kwargs)
def get_bootarch(self):
return 'armv7'
def get_platform(self):
return 'a15ve'
class Gem5MachineARMOperations(Gem5MachineBaseOperations):
def set_bootmodules(self, modules):
# write menu.lst in build directory
debug.verbose("writing menu.lst in build directory")
menulst_fullpath = os.path.join(self._machine.options.builds[0].build_dir,
"platforms", "arm", self.menulst_template)
debug.verbose("writing menu.lst in build directory: %s" %
menulst_fullpath)
self._machine._write_menu_lst(modules.get_menu_data("/"), menulst_fullpath)
debug.verbose("building proper gem5 image")
debug.checkcmd(["make", self._machine.imagename],
cwd=self._machine.options.builds[0].build_dir)
# SK: did not test this yet, but should work
# @machines.add_machine
# class Gem5MachineARMSingleCore(Gem5MachineARM):
# name = 'gem5_arm_1'
# def get_ncores(self):
# return 1
# def _get_cmdline(self):
# script_path = os.path.join(self.options.sourcedir, 'tools/arm_gem5', 'gem5script.py')
# return (['gem5.fast', script_path, '--kernel=%s'%self.kernel_img, '--n=%s'%self.get_ncores()]
# + GEM5_CACHES_ENABLE)
class Gem5MachineARMSingleCore(Gem5MachineARM):
name = 'armv7_gem5'
def __init__(self, options, **kwargs):
super(Gem5MachineARMSingleCore, self).__init__(options, Gem5MachineARMSingleCoreOperations(self), **kwargs)
class Gem5MachineARMSingleCoreOperations(Gem5MachineARMOperations):
def _get_cmdline(self):
self.get_free_port()
script_path = \
os.path.join(self._machine.options.sourcedir, 'tools/arm_gem5',
'boot_gem5.sh')
return ([script_path, 'VExpress_EMM', self._machine.kernel_img, GEM5_PATH,
str(self.telnet_port)])
MachineFactory.addMachine(Gem5MachineARMSingleCore.name, Gem5MachineARMSingleCore,
bootarch="armv7",
platform="a15ve")
| mit | 70,575,520,908,050,710 | 37.554745 | 115 | 0.639152 | false | 3.475 | false | false | false |
sbc100/yapf | yapf/yapflib/format_decision_state.py | 1 | 38486 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a format decision state object that manages whitespace decisions.
Each token is processed one at a time, at which point its whitespace formatting
decisions are made. A graph of potential whitespace formattings is created,
where each node in the graph is a format decision state object. The heuristic
tries formatting the token with and without a newline before it to determine
which one has the least penalty. Therefore, the format decision state object for
each decision needs to be its own unique copy.
Once the heuristic determines the best formatting, it makes a non-dry run pass
through the code to commit the whitespace formatting.
FormatDecisionState: main class exported by this module.
"""
from yapf.yapflib import format_token
from yapf.yapflib import object_state
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapf.yapflib import unwrapped_line
class FormatDecisionState(object):
"""The current state when indenting an unwrapped line.
The FormatDecisionState object is meant to be copied instead of referenced.
Attributes:
first_indent: The indent of the first token.
column: The number of used columns in the current line.
next_token: The next token to be formatted.
paren_level: The level of nesting inside (), [], and {}.
lowest_level_on_line: The lowest paren_level on the current line.
newline: Indicates if a newline is added along the edge to this format
decision state node.
previous: The previous format decision state in the decision tree.
stack: A stack (of _ParenState) keeping track of properties applying to
parenthesis levels.
comp_stack: A stack (of ComprehensionState) keeping track of properties
applying to comprehensions.
ignore_stack_for_comparison: Ignore the stack of _ParenState for state
comparison.
"""
def __init__(self, line, first_indent):
"""Initializer.
Initializes to the state after placing the first token from 'line' at
'first_indent'.
Arguments:
line: (UnwrappedLine) The unwrapped line we're currently processing.
first_indent: (int) The indent of the first token.
"""
self.next_token = line.first
self.column = first_indent
self.line = line
self.paren_level = 0
self.lowest_level_on_line = 0
self.ignore_stack_for_comparison = False
self.stack = [_ParenState(first_indent, first_indent)]
self.comp_stack = []
self.first_indent = first_indent
self.newline = False
self.previous = None
self.column_limit = style.Get('COLUMN_LIMIT')
def Clone(self):
"""Clones a FormatDecisionState object."""
new = FormatDecisionState(self.line, self.first_indent)
new.next_token = self.next_token
new.column = self.column
new.line = self.line
new.paren_level = self.paren_level
new.line.depth = self.line.depth
new.lowest_level_on_line = self.lowest_level_on_line
new.ignore_stack_for_comparison = self.ignore_stack_for_comparison
new.first_indent = self.first_indent
new.newline = self.newline
new.previous = self.previous
new.stack = [state.Clone() for state in self.stack]
new.comp_stack = [state.Clone() for state in self.comp_stack]
return new
def __eq__(self, other):
# Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous',
# because it shouldn't have a bearing on this comparison. (I.e., it will
# report equal if 'next_token' does.)
return (self.next_token == other.next_token and
self.column == other.column and
self.paren_level == other.paren_level and
self.line.depth == other.line.depth and
self.lowest_level_on_line == other.lowest_level_on_line and
(self.ignore_stack_for_comparison or
other.ignore_stack_for_comparison or
self.stack == other.stack and self.comp_stack == other.comp_stack))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.next_token, self.column, self.paren_level,
self.line.depth, self.lowest_level_on_line))
def __repr__(self):
return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' %
(self.column, repr(self.next_token), self.paren_level,
'\n\t'.join(repr(s) for s in self.stack) + ']'))
def CanSplit(self, must_split):
"""Determine if we can split before the next token.
Arguments:
must_split: (bool) A newline was required before this token.
Returns:
True if the line can be split before the next token.
"""
current = self.next_token
previous = current.previous_token
if current.is_pseudo_paren:
return False
if (not must_split and
format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes and
format_token.Subtype.DICTIONARY_KEY not in current.subtypes and
not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')):
# In some situations, a dictionary may be multiline, but pylint doesn't
# like it. So don't allow it unless forced to.
return False
if (not must_split and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes and
not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')):
return False
if previous and previous.value == '(' and current.value == ')':
# Don't split an empty function call list if we aren't splitting before
# dict values.
token = previous.previous_token
while token:
prev = token.previous_token
if not prev or prev.name not in {'NAME', 'DOT'}:
break
token = token.previous_token
if token and format_token.Subtype.DICTIONARY_VALUE in token.subtypes:
if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'):
return False
if previous and previous.value == '.' and current.value == '.':
return False
return current.can_break_before
def MustSplit(self):
"""Returns True if the line must split before the next token."""
current = self.next_token
previous = current.previous_token
if current.is_pseudo_paren:
return False
if current.must_break_before:
return True
if not previous:
return False
if style.Get('SPLIT_ALL_COMMA_SEPARATED_VALUES') and previous.value == ',':
return True
if (self.stack[-1].split_before_closing_bracket and
current.value in '}]' and style.Get('SPLIT_BEFORE_CLOSING_BRACKET')):
# Split before the closing bracket if we can.
return current.node_split_penalty != split_penalty.UNBREAKABLE
if (current.value == ')' and previous.value == ',' and
not _IsSingleElementTuple(current.matching_bracket)):
return True
# Prevent splitting before the first argument in compound statements
# with the exception of function declarations.
if (style.Get('SPLIT_BEFORE_FIRST_ARGUMENT') and
_IsCompoundStatement(self.line.first) and
not _IsFunctionDef(self.line.first)):
return False
###########################################################################
# List Splitting
if (style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')):
bracket = current if current.ClosesScope() else previous
if format_token.Subtype.SUBSCRIPT_BRACKET not in bracket.subtypes:
if bracket.OpensScope():
if style.Get('COALESCE_BRACKETS'):
if current.OpensScope():
# Prefer to keep all opening brackets together.
return False
if (not _IsLastScopeInLine(bracket) or
unwrapped_line.IsSurroundedByBrackets(bracket)):
last_token = bracket.matching_bracket
else:
last_token = _LastTokenInLine(bracket.matching_bracket)
if not self._FitsOnLine(bracket, last_token):
# Split before the first element if the whole list can't fit on a
# single line.
self.stack[-1].split_before_closing_bracket = True
return True
elif style.Get('DEDENT_CLOSING_BRACKETS') and current.ClosesScope():
# Split before and dedent the closing bracket.
return self.stack[-1].split_before_closing_bracket
if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and
current.is_name):
# An expression that's surrounded by parens gets split after the opening
# parenthesis.
def SurroundedByParens(token):
"""Check if it's an expression surrounded by parentheses."""
while token:
if token.value == ',':
return False
if token.value == ')':
return not token.next_token
if token.OpensScope():
token = token.matching_bracket.next_token
else:
token = token.next_token
return False
if (previous.value == '(' and not previous.is_pseudo_paren and
not unwrapped_line.IsSurroundedByBrackets(previous)):
pptoken = previous.previous_token
if (pptoken and not pptoken.is_name and not pptoken.is_keyword and
SurroundedByParens(current)):
return True
if (current.is_name or current.is_string) and previous.value == ',':
# If the list has function calls in it and the full list itself cannot
# fit on the line, then we want to split. Otherwise, we'll get something
# like this:
#
# X = [
# Bar(xxx='some string',
# yyy='another long string',
# zzz='a third long string'), Bar(
# xxx='some string',
# yyy='another long string',
# zzz='a third long string')
# ]
#
# or when a string formatting syntax.
func_call_or_string_format = False
tok = current.next_token
if current.is_name:
while tok and (tok.is_name or tok.value == '.'):
tok = tok.next_token
func_call_or_string_format = tok and tok.value == '('
elif current.is_string:
while tok and tok.is_string:
tok = tok.next_token
func_call_or_string_format = tok and tok.value == '%'
if func_call_or_string_format:
open_bracket = unwrapped_line.IsSurroundedByBrackets(current)
if open_bracket:
if open_bracket.value in '[{':
if not self._FitsOnLine(open_bracket,
open_bracket.matching_bracket):
return True
elif tok.value == '(':
if not self._FitsOnLine(current, tok.matching_bracket):
return True
###########################################################################
# Dict/Set Splitting
if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') and
format_token.Subtype.DICTIONARY_KEY in current.subtypes and
not current.is_comment):
# Place each dictionary entry onto its own line.
if previous.value == '{' and previous.previous_token:
opening = _GetOpeningBracket(previous.previous_token)
if (opening and opening.value == '(' and opening.previous_token and
opening.previous_token.is_name):
# This is a dictionary that's an argument to a function.
if (self._FitsOnLine(previous, previous.matching_bracket) and
previous.matching_bracket.next_token and
(not opening.matching_bracket.next_token or
opening.matching_bracket.next_token.value != '.') and
_ScopeHasNoCommas(previous)):
# Don't split before the key if:
# - The dictionary fits on a line, and
# - The function call isn't part of a builder-style call and
# - The dictionary has one entry and no trailing comma
return False
return True
if (style.Get('SPLIT_BEFORE_DICT_SET_GENERATOR') and
format_token.Subtype.DICT_SET_GENERATOR in current.subtypes):
# Split before a dict/set generator.
return True
if (format_token.Subtype.DICTIONARY_VALUE in current.subtypes or
(previous.is_pseudo_paren and previous.value == '(' and
not current.is_comment)):
# Split before the dictionary value if we can't fit every dictionary
# entry on its own line.
if not current.OpensScope():
opening = _GetOpeningBracket(current)
if not self._EachDictEntryFitsOnOneLine(opening):
return style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')
if previous.value == '{':
# Split if the dict/set cannot fit on one line and ends in a comma.
closing = previous.matching_bracket
if (not self._FitsOnLine(previous, closing) and
closing.previous_token.value == ','):
self.stack[-1].split_before_closing_bracket = True
return True
###########################################################################
# Argument List Splitting
if (style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and not current.is_comment and
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in
current.subtypes):
if (previous.value not in {'=', ':', '*', '**'} and
current.value not in ':=,)' and not _IsFunctionDefinition(previous)):
# If we're going to split the lines because of named arguments, then we
# want to split after the opening bracket as well. But not when this is
# part of a function definition.
if previous.value == '(':
# Make sure we don't split after the opening bracket if the
# continuation indent is greater than the opening bracket:
#
# a(
# b=1,
# c=2)
if (self._FitsOnLine(previous, previous.matching_bracket) and
unwrapped_line.IsSurroundedByBrackets(previous)):
# An argument to a function is a function call with named
# assigns.
return False
column = self.column - self.stack[-1].last_space
return column > style.Get('CONTINUATION_INDENT_WIDTH')
opening = _GetOpeningBracket(current)
if opening:
arglist_length = (
opening.matching_bracket.total_length - opening.total_length +
self.stack[-1].indent)
return arglist_length > self.column_limit
if (current.value not in '{)' and previous.value == '(' and
self._ArgumentListHasDictionaryEntry(current)):
return True
if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'):
# Split before arguments in a function call or definition if the
# arguments are terminated by a comma.
opening = _GetOpeningBracket(current)
if opening and opening.previous_token and opening.previous_token.is_name:
if previous.value in '(,':
if opening.matching_bracket.previous_token.value == ',':
return True
if ((current.is_name or current.value in {'*', '**'}) and
previous.value == ','):
# If we have a function call within an argument list and it won't fit on
# the remaining line, but it will fit on a line by itself, then go ahead
# and split before the call.
opening = _GetOpeningBracket(current)
if (opening and opening.value == '(' and opening.previous_token and
(opening.previous_token.is_name or
opening.previous_token.value in {'*', '**'})):
is_func_call = False
opening = current
while opening:
if opening.value == '(':
is_func_call = True
break
if (not (opening.is_name or opening.value in {'*', '**'}) and
opening.value != '.'):
break
opening = opening.next_token
if is_func_call:
if (not self._FitsOnLine(current, opening.matching_bracket) or
(opening.matching_bracket.next_token and
opening.matching_bracket.next_token.value != ',' and
not opening.matching_bracket.next_token.ClosesScope())):
return True
pprevious = previous.previous_token
if (current.is_name and pprevious and pprevious.is_name and
previous.value == '('):
if (not self._FitsOnLine(previous, previous.matching_bracket) and
_IsFunctionCallWithArguments(current)):
# There is a function call, with more than 1 argument, where the first
# argument is itself a function call with arguments. In this specific
# case, if we split after the first argument's opening '(', then the
# formatting will look bad for the rest of the arguments. E.g.:
#
# outer_function_call(inner_function_call(
# inner_arg1, inner_arg2),
# outer_arg1, outer_arg2)
#
# Instead, enforce a split before that argument to keep things looking
# good.
return True
if (previous.OpensScope() and not current.OpensScope() and
not current.is_comment and
format_token.Subtype.SUBSCRIPT_BRACKET not in previous.subtypes):
if pprevious and not pprevious.is_keyword and not pprevious.is_name:
# We want to split if there's a comment in the container.
token = current
while token != previous.matching_bracket:
if token.is_comment:
return True
token = token.next_token
if previous.value == '(':
pptoken = previous.previous_token
if not pptoken or not pptoken.is_name:
# Split after the opening of a tuple if it doesn't fit on the current
# line and it's not a function call.
if self._FitsOnLine(previous, previous.matching_bracket):
return False
elif not self._FitsOnLine(previous, previous.matching_bracket):
if len(previous.container_elements) == 1:
return False
elements = previous.container_elements + [previous.matching_bracket]
i = 1
while i < len(elements):
if (not elements[i - 1].OpensScope() and
not self._FitsOnLine(elements[i - 1], elements[i])):
return True
i += 1
if (self.column_limit - self.column) / float(self.column_limit) < 0.3:
# Try not to squish all of the arguments off to the right.
return True
else:
# Split after the opening of a container if it doesn't fit on the
# current line.
if not self._FitsOnLine(previous, previous.matching_bracket):
return True
###########################################################################
# Original Formatting Splitting
# These checks rely upon the original formatting. This is in order to
# attempt to keep hand-written code in the same condition as it was before.
# However, this may cause the formatter to fail to be idempotent.
if (style.Get('SPLIT_BEFORE_BITWISE_OPERATOR') and current.value in '&|' and
previous.lineno < current.lineno):
# Retain the split before a bitwise operator.
return True
if (current.is_comment and
previous.lineno < current.lineno - current.value.count('\n')):
# If a comment comes in the middle of an unwrapped line (like an if
# conditional with comments interspersed), then we want to split if the
# original comments were on a separate line.
return True
return False
def AddTokenToState(self, newline, dry_run, must_split=False):
"""Add a token to the format decision state.
Allow the heuristic to try out adding the token with and without a newline.
Later on, the algorithm will determine which one has the lowest penalty.
Arguments:
newline: (bool) Add the token on a new line if True.
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The penalty of splitting after the current token.
"""
penalty = 0
if newline:
penalty = self._AddTokenOnNewline(dry_run, must_split)
else:
self._AddTokenOnCurrentLine(dry_run)
penalty += self._CalculateComprehensionState(newline)
return self.MoveStateToNextToken() + penalty
def _AddTokenOnCurrentLine(self, dry_run):
"""Puts the token on the current line.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Commit whitespace changes to the FormatToken if True.
"""
current = self.next_token
previous = current.previous_token
spaces = current.spaces_required_before
if not dry_run:
current.AddWhitespacePrefix(newlines_before=0, spaces=spaces)
if previous.OpensScope():
if not current.is_comment:
# Align closing scopes that are on a newline with the opening scope:
#
# foo = [a,
# b,
# ]
self.stack[-1].closing_scope_indent = self.column - 1
if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'):
self.stack[-1].closing_scope_indent += 1
self.stack[-1].indent = self.column + spaces
else:
self.stack[-1].closing_scope_indent = (
self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.column += spaces
def _AddTokenOnNewline(self, dry_run, must_split):
"""Adds a line break and necessary indentation.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The split penalty for splitting after the current state.
"""
current = self.next_token
previous = current.previous_token
self.column = self._GetNewlineColumn()
if not dry_run:
indent_level = self.line.depth
spaces = self.column
if spaces:
spaces -= indent_level * style.Get('INDENT_WIDTH')
current.AddWhitespacePrefix(
newlines_before=1, spaces=spaces, indent_level=indent_level)
if not current.is_comment:
self.stack[-1].last_space = self.column
self.lowest_level_on_line = self.paren_level
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
self.stack[-1].closing_scope_indent = max(
0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.stack[-1].split_before_closing_bracket = True
# Calculate the split penalty.
penalty = current.split_penalty
if must_split:
# Don't penalize for a must split.
return penalty
if previous.is_pseudo_paren and previous.value == '(':
# Small penalty for splitting after a pseudo paren.
penalty += 50
# Add a penalty for each increasing newline we add, but don't penalize for
# splitting before an if-expression or list comprehension.
if current.value not in {'if', 'for'}:
last = self.stack[-1]
last.num_line_splits += 1
penalty += (
style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') *
last.num_line_splits)
if current.OpensScope() and previous.OpensScope():
# Prefer to keep opening brackets coalesced (unless it's at the beginning
# of a function call).
pprev = previous.previous_token
if not pprev or not pprev.is_name:
penalty += 10
return penalty + 10
def MoveStateToNextToken(self):
"""Calculate format decision state information and move onto the next token.
Before moving onto the next token, we first calculate the format decision
state given the current token and its formatting decisions. Then the format
decision state is set up so that the next token can be added.
Returns:
The penalty for the number of characters over the column limit.
"""
current = self.next_token
if not current.OpensScope() and not current.ClosesScope():
self.lowest_level_on_line = min(self.lowest_level_on_line,
self.paren_level)
# If we encounter an opening bracket, we add a level to our stack to prepare
# for the subsequent tokens.
if current.OpensScope():
last = self.stack[-1]
new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space
self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))
self.paren_level += 1
# If we encounter a closing bracket, we can remove a level from our
# parenthesis stack.
if len(self.stack) > 1 and current.ClosesScope():
if format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes:
self.stack[-2].last_space = self.stack[-2].indent
else:
self.stack[-2].last_space = self.stack[-1].last_space
self.stack.pop()
self.paren_level -= 1
is_multiline_string = current.is_string and '\n' in current.value
if is_multiline_string:
# This is a multiline string. Only look at the first line.
self.column += len(current.value.split('\n')[0])
elif not current.is_pseudo_paren:
self.column += len(current.value)
self.next_token = self.next_token.next_token
# Calculate the penalty for overflowing the column limit.
penalty = 0
if (not current.is_pylint_comment and not current.is_pytype_comment and
self.column > self.column_limit):
excess_characters = self.column - self.column_limit
penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters
if is_multiline_string:
# If this is a multiline string, the column is actually the
# end of the last line in the string.
self.column = len(current.value.split('\n')[-1])
return penalty
def _CalculateComprehensionState(self, newline):
"""Makes required changes to comprehension state.
Args:
newline: Whether the current token is to be added on a newline.
Returns:
The penalty for the token-newline combination given the current
comprehension state.
"""
current = self.next_token
previous = current.previous_token
top_of_stack = self.comp_stack[-1] if self.comp_stack else None
penalty = 0
if top_of_stack is not None:
# Check if the token terminates the current comprehension.
if current == top_of_stack.closing_bracket:
last = self.comp_stack.pop()
# Lightly penalize comprehensions that are split across multiple lines.
if last.has_interior_split:
penalty += style.Get('SPLIT_PENALTY_COMPREHENSION')
return penalty
if newline:
top_of_stack.has_interior_split = True
if (format_token.Subtype.COMP_EXPR in current.subtypes and
format_token.Subtype.COMP_EXPR not in previous.subtypes):
self.comp_stack.append(object_state.ComprehensionState(current))
return penalty
if (current.value == 'for' and
format_token.Subtype.COMP_FOR in current.subtypes):
if top_of_stack.for_token is not None:
# Treat nested comprehensions like normal comp_if expressions.
# Example:
# my_comp = [
# a.qux + b.qux
# for a in foo
# --> for b in bar <--
# if a.zut + b.zut
# ]
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and
top_of_stack.has_split_at_for != newline and
(top_of_stack.has_split_at_for or
not top_of_stack.HasTrivialExpr())):
penalty += split_penalty.UNBREAKABLE
else:
top_of_stack.for_token = current
top_of_stack.has_split_at_for = newline
# Try to keep trivial expressions on the same line as the comp_for.
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and
top_of_stack.HasTrivialExpr()):
penalty += split_penalty.CONNECTED
if (format_token.Subtype.COMP_IF in current.subtypes and
format_token.Subtype.COMP_IF not in previous.subtypes):
# Penalize breaking at comp_if when it doesn't match the newline structure
# in the rest of the comprehension.
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and
top_of_stack.has_split_at_for != newline and
(top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())):
penalty += split_penalty.UNBREAKABLE
return penalty
def _GetNewlineColumn(self):
"""Return the new column on the newline."""
current = self.next_token
previous = current.previous_token
top_of_stack = self.stack[-1]
if current.spaces_required_before > 2 or self.line.disable:
return current.spaces_required_before
if current.OpensScope():
return top_of_stack.indent if self.paren_level else self.first_indent
if current.ClosesScope():
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
return max(0,
top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH'))
return top_of_stack.closing_scope_indent
if (previous and previous.is_string and current.is_string and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes):
return previous.column
if style.Get('INDENT_DICTIONARY_VALUE'):
if previous and (previous.value == ':' or previous.is_pseudo_paren):
if format_token.Subtype.DICTIONARY_VALUE in current.subtypes:
return top_of_stack.indent
if (_IsCompoundStatement(self.line.first) and
(not style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT'))):
token_indent = (
len(self.line.first.whitespace_prefix.split('\n')[-1]) +
style.Get('INDENT_WIDTH'))
if token_indent == top_of_stack.indent:
return top_of_stack.indent + style.Get('CONTINUATION_INDENT_WIDTH')
return top_of_stack.indent
def _FitsOnLine(self, start, end):
"""Determines if line between start and end can fit on the current line."""
length = end.total_length - start.total_length
if not start.is_pseudo_paren:
length += len(start.value)
return length + self.column <= self.column_limit
def _EachDictEntryFitsOnOneLine(self, opening):
"""Determine if each dict elems can fit on one line."""
def PreviousNonCommentToken(tok):
tok = tok.previous_token
while tok.is_comment:
tok = tok.previous_token
return tok
def ImplicitStringConcatenation(tok):
num_strings = 0
if tok.is_pseudo_paren:
tok = tok.next_token
while tok.is_string:
num_strings += 1
tok = tok.next_token
return num_strings > 1
closing = opening.matching_bracket
entry_start = opening.next_token
current = opening.next_token.next_token
while current and current != closing:
if format_token.Subtype.DICTIONARY_KEY in current.subtypes:
prev = PreviousNonCommentToken(current)
length = prev.total_length - entry_start.total_length
length += len(entry_start.value)
if length + self.stack[-2].indent >= self.column_limit:
return False
entry_start = current
if current.OpensScope():
if ((current.value == '{' or
(current.is_pseudo_paren and current.next_token.value == '{') and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes) or
ImplicitStringConcatenation(current)):
# A dictionary entry that cannot fit on a single line shouldn't matter
# to this calculation. If it can't fit on a single line, then the
# opening should be on the same line as the key and the rest on
# newlines after it. But the other entries should be on single lines
# if possible.
if current.matching_bracket:
current = current.matching_bracket
while current:
if current == closing:
return True
if format_token.Subtype.DICTIONARY_KEY in current.subtypes:
entry_start = current
break
current = current.next_token
else:
current = current.matching_bracket
else:
current = current.next_token
# At this point, current is the closing bracket. Go back one to get the the
# end of the dictionary entry.
current = PreviousNonCommentToken(current)
length = current.total_length - entry_start.total_length
length += len(entry_start.value)
return length + self.stack[-2].indent <= self.column_limit
def _ArgumentListHasDictionaryEntry(self, token):
"""Check if the function argument list has a dictionary as an arg."""
if _IsArgumentToFunction(token):
while token:
if token.value == '{':
length = token.matching_bracket.total_length - token.total_length
return length + self.stack[-2].indent > self.column_limit
if token.ClosesScope():
break
if token.OpensScope():
token = token.matching_bracket
token = token.next_token
return False
_COMPOUND_STMTS = frozenset(
{'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class'})
def _IsCompoundStatement(token):
if token.value == 'async':
token = token.next_token
return token.value in _COMPOUND_STMTS
def _IsFunctionDef(token):
if token.value == 'async':
token = token.next_token
return token.value == 'def'
def _IsFunctionCallWithArguments(token):
while token:
if token.value == '(':
token = token.next_token
return token and token.value != ')'
elif token.name not in {'NAME', 'DOT', 'EQUAL'}:
break
token = token.next_token
return False
def _IsArgumentToFunction(token):
bracket = unwrapped_line.IsSurroundedByBrackets(token)
if not bracket or bracket.value != '(':
return False
previous = bracket.previous_token
return previous and previous.is_name
def _GetLengthOfSubtype(token, subtype, exclude=None):
current = token
while (current.next_token and subtype in current.subtypes and
(exclude is None or exclude not in current.subtypes)):
current = current.next_token
return current.total_length - token.total_length + 1
def _GetOpeningBracket(current):
"""Get the opening bracket containing the current token."""
if current.matching_bracket and not current.is_pseudo_paren:
return current.matching_bracket
while current:
if current.ClosesScope():
current = current.matching_bracket
elif current.is_pseudo_paren:
current = current.previous_token
elif current.OpensScope():
return current
current = current.previous_token
return None
def _LastTokenInLine(current):
while not current.is_comment and current.next_token:
current = current.next_token
return current
def _IsFunctionDefinition(current):
prev = current.previous_token
return (current.value == '(' and prev and
format_token.Subtype.FUNC_DEF in prev.subtypes)
def _IsLastScopeInLine(current):
while current:
current = current.next_token
if current and current.OpensScope():
return False
return True
def _IsSingleElementTuple(token):
"""Check if it's a single-element tuple."""
close = token.matching_bracket
token = token.next_token
num_commas = 0
while token != close:
if token.value == ',':
num_commas += 1
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return num_commas == 1
def _ScopeHasNoCommas(token):
"""Check if the scope has no commas."""
close = token.matching_bracket
token = token.next_token
while token != close:
if token.value == ',':
return False
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return True
class _ParenState(object):
"""Maintains the state of the bracket enclosures.
A stack of _ParenState objects are kept so that we know how to indent relative
to the brackets.
Attributes:
indent: The column position to which a specified parenthesis level needs to
be indented.
last_space: The column position of the last space on each level.
split_before_closing_bracket: Whether a newline needs to be inserted before
the closing bracket. We only want to insert a newline before the closing
bracket if there also was a newline after the beginning left bracket.
num_line_splits: Number of line splits this _ParenState contains already.
Each subsequent line split gets an increasing penalty.
"""
# TODO(morbo): This doesn't track "bin packing."
def __init__(self, indent, last_space):
self.indent = indent
self.last_space = last_space
self.closing_scope_indent = 0
self.split_before_closing_bracket = False
self.num_line_splits = 0
def Clone(self):
state = _ParenState(self.indent, self.last_space)
state.closing_scope_indent = self.closing_scope_indent
state.split_before_closing_bracket = self.split_before_closing_bracket
state.num_line_splits = self.num_line_splits
return state
def __repr__(self):
return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % (
self.indent, self.last_space, self.closing_scope_indent)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self == other
def __hash__(self, *args, **kwargs):
return hash((self.indent, self.last_space, self.closing_scope_indent,
self.split_before_closing_bracket, self.num_line_splits))
| apache-2.0 | 1,545,245,249,763,236,600 | 37.06726 | 80 | 0.644364 | false | 4.030792 | false | false | false |
demisto/content | Packs/ThinkstCanary/Integrations/ThinkstCanary/ThinkstCanary_test.py | 1 | 3171 | import demistomock as demisto
MOCK_PARAMS = {
'access-key': 'fake_access_key',
'secret-key': 'fake_access_key',
'server': 'http://123-fake-api.com/',
'unsecure': True,
'proxy': True
}
def test_fetch_incidents(mocker, requests_mock):
"""
Given: An existing last run time.
When: Running a fetch incidents command normally (not a first run).
Then: The last run time object should increment by 1 second.
2020-01-07-04:58:18 -> 2020-01-07-04:58:19
"""
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'getLastRun', return_value={'time': '2020-07-01-04:58:18'})
mocker.patch.object(demisto, 'setLastRun')
requests_mock.get('http://123-fake-api.com/api/v1/incidents/unacknowledged?newer_than=2020-07-01-04%3A58%3A18',
json={'incidents': [{'description': {'created': 1593579498}}]})
from ThinkstCanary import fetch_incidents_command
fetch_incidents_command()
assert demisto.setLastRun.call_args[0][0]['time'] == '2020-07-01-04:58:19'
def test_check_whitelist_command_not_whitelisted(mocker):
"""
Given: An IP to check
When: Running check_whitelist_command.
Then: The IP should not be ignored (not in the whitelist).
"""
import ThinkstCanary
ip_to_check = "1.2.3.4"
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_check})
mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': False,
'is_whitelist_enabled': True})
ThinkstCanary.check_whitelist_command()
assert demisto.results.call_args_list[0][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is not ' \
'Whitelisted'
def test_check_whitelist_commands_whitelisted(mocker):
"""
Given: An already whitelisted IP to check
When: Inserting IP to whitelist (whitelist_ip_command) and checking if it is whitelisted (check_whitelist_command).
Then: The IP should be ignored (in the whitelist), and an appropriate message to the user should be prompted.
"""
import ThinkstCanary
ip_to_whitelist = "1.2.3.4"
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_whitelist})
mocker.patch.object(ThinkstCanary, 'whitelist_ip', return_value={'message': 'Whitelist added',
'result': 'success'})
mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': True,
'is_whitelist_enabled': True})
ThinkstCanary.whitelist_ip_command()
ThinkstCanary.check_whitelist_command()
assert demisto.results.call_args_list[1][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is Whitelisted'
| mit | -3,411,973,804,248,822,300 | 47.784615 | 120 | 0.62567 | false | 3.503867 | false | false | false |
rajarsheem/libsdae-autoencoder-tensorflow | deepautoencoder/stacked_autoencoder.py | 1 | 6154 | import numpy as np
import deepautoencoder.utils as utils
import tensorflow as tf
allowed_activations = ['sigmoid', 'tanh', 'softmax', 'relu', 'linear']
allowed_noises = [None, 'gaussian', 'mask']
allowed_losses = ['rmse', 'cross-entropy']
class StackedAutoEncoder:
"""A deep autoencoder with denoising capability"""
def assertions(self):
global allowed_activations, allowed_noises, allowed_losses
assert self.loss in allowed_losses, 'Incorrect loss given'
assert 'list' in str(
type(self.dims)), 'dims must be a list even if there is one layer.'
assert len(self.epoch) == len(
self.dims), "No. of epochs must equal to no. of hidden layers"
assert len(self.activations) == len(
self.dims), "No. of activations must equal to no. of hidden layers"
assert all(
True if x > 0 else False
for x in self.epoch), "No. of epoch must be atleast 1"
assert set(self.activations + allowed_activations) == set(
allowed_activations), "Incorrect activation given."
assert utils.noise_validator(
self.noise, allowed_noises), "Incorrect noise given"
def __init__(self, dims, activations, epoch=1000, noise=None, loss='rmse',
lr=0.001, batch_size=100, print_step=50):
self.print_step = print_step
self.batch_size = batch_size
self.lr = lr
self.loss = loss
self.activations = activations
self.noise = noise
self.epoch = epoch
self.dims = dims
self.assertions()
self.depth = len(dims)
self.weights, self.biases = [], []
def add_noise(self, x):
if self.noise == 'gaussian':
n = np.random.normal(0, 0.1, (len(x), len(x[0])))
return x + n
if 'mask' in self.noise:
frac = float(self.noise.split('-')[1])
temp = np.copy(x)
for i in temp:
n = np.random.choice(len(i), round(
frac * len(i)), replace=False)
i[n] = 0
return temp
if self.noise == 'sp':
pass
def fit(self, x):
for i in range(self.depth):
print('Layer {0}'.format(i + 1))
if self.noise is None:
x = self.run(data_x=x, activation=self.activations[i],
data_x_=x,
hidden_dim=self.dims[i], epoch=self.epoch[
i], loss=self.loss,
batch_size=self.batch_size, lr=self.lr,
print_step=self.print_step)
else:
temp = np.copy(x)
x = self.run(data_x=self.add_noise(temp),
activation=self.activations[i], data_x_=x,
hidden_dim=self.dims[i],
epoch=self.epoch[
i], loss=self.loss,
batch_size=self.batch_size,
lr=self.lr, print_step=self.print_step)
def transform(self, data):
tf.reset_default_graph()
sess = tf.Session()
x = tf.constant(data, dtype=tf.float32)
for w, b, a in zip(self.weights, self.biases, self.activations):
weight = tf.constant(w, dtype=tf.float32)
bias = tf.constant(b, dtype=tf.float32)
layer = tf.matmul(x, weight) + bias
x = self.activate(layer, a)
return x.eval(session=sess)
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
def run(self, data_x, data_x_, hidden_dim, activation, loss, lr,
print_step, epoch, batch_size=100):
tf.reset_default_graph()
input_dim = len(data_x[0])
sess = tf.Session()
x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x')
x_ = tf.placeholder(dtype=tf.float32, shape=[
None, input_dim], name='x_')
encode = {'weights': tf.Variable(tf.truncated_normal(
[input_dim, hidden_dim], dtype=tf.float32)),
'biases': tf.Variable(tf.truncated_normal([hidden_dim],
dtype=tf.float32))}
decode = {'biases': tf.Variable(tf.truncated_normal([input_dim],
dtype=tf.float32)),
'weights': tf.transpose(encode['weights'])}
encoded = self.activate(
tf.matmul(x, encode['weights']) + encode['biases'], activation)
decoded = tf.matmul(encoded, decode['weights']) + decode['biases']
# reconstruction loss
if loss == 'rmse':
loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(x_, decoded))))
elif loss == 'cross-entropy':
loss = -tf.reduce_mean(x_ * tf.log(decoded))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
sess.run(tf.global_variables_initializer())
for i in range(epoch):
b_x, b_x_ = utils.get_batch(
data_x, data_x_, batch_size)
sess.run(train_op, feed_dict={x: b_x, x_: b_x_})
if (i + 1) % print_step == 0:
l = sess.run(loss, feed_dict={x: data_x, x_: data_x_})
print('epoch {0}: global loss = {1}'.format(i, l))
# self.loss_val = l
# debug
# print('Decoded', sess.run(decoded, feed_dict={x: self.data_x_})[0])
self.weights.append(sess.run(encode['weights']))
self.biases.append(sess.run(encode['biases']))
return sess.run(encoded, feed_dict={x: data_x_})
def activate(self, linear, name):
if name == 'sigmoid':
return tf.nn.sigmoid(linear, name='encoded')
elif name == 'softmax':
return tf.nn.softmax(linear, name='encoded')
elif name == 'linear':
return linear
elif name == 'tanh':
return tf.nn.tanh(linear, name='encoded')
elif name == 'relu':
return tf.nn.relu(linear, name='encoded')
| mit | -4,848,992,055,917,937,000 | 41.441379 | 79 | 0.524862 | false | 3.834268 | false | false | false |
aamirmajeedkhan/P4-conference-central | conference.py | 1 | 35026 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime,time
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import SessionType,Speaker,Session,SessionForm,SessionForms
from models import SessionQueryForm,SessionQueryForms
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
from models import Conference
from models import ConferenceForm
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
CONF_FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
SESSION_FIELDS = {
'NAME': 'name',
'DURATION': 'duration',
'TYPE_OF_SESSION': 'typeOfSession',
'Date': 'date',
'START_TIME':'startTime',
'SPEAKER':'speaker',
}
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
from models import BooleanMessage
from models import ConflictException
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1, required=True),
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1, required=True)
)
SESSION_TYPE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1, required=True),
sessionType=messages.StringField(2, required=True)
)
SESSION_WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1, required=True)
)
SESSION_REQUIRED_FIELDS = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
MEMCACHE_ANNOUNCEMENTS_KEY="LATEST_ANNOUNCEMENT"
MEMCACHE_FEATURED_SPEAKER_KEY="FEATURED_SPEAKER"
from google.appengine.api import memcache
from models import StringMessage
from google.appengine.api import taskqueue
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# retrieve profile from datastore
user_id=getUserId(user)
p_key = ndb.Key(Profile,user_id)
profile = p_key.get()
# create profile if not exist
if not profile:
profile = Profile(
key = p_key, # TODO 1 step 4. replace with the key from step 3
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf,names[conf.organizerUserId]) \
for conf in conferences]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# make profile key
p_key = ndb.Key(Profile, getUserId(user))
# create ancestor query for this user
conferences = Conference.query(ancestor=p_key)
# get the user profile and display name
prof = p_key.get()
displayName = getattr(prof, 'displayName')
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, displayName) for conf in conferences]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
q = Conference.query()
# simple filter usage:
# q = q.filter(Conference.city == "Paris")
# advanced filter building and usage
field = "city"
operator = "="
value = "London"
f = ndb.query.FilterNode(field, operator, value)
q = q.filter(f)
q=q.order(Conference.maxAttendees)
# filter for month of june
q=q.filter(Conference.maxAttendees > 6)
# TODO
# add 2 filters:
# 1: city equals to London
# 2: topic equals "Medical Innovations"
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters,type='Conference'):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
if type == 'Conference':
filtr["field"] = CONF_FIELDS[filtr["field"]]
elif type == 'Session':
filtr["field"] = SESSION_FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
#@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser()
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# retrieve organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf,
names[conf.organizerUserId])\
for conf in conferences]
)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ""
return StringMessage(data=announcement)
# - - - Conference Session - - - - - - - - - - - - - - - - - - - -
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET',
name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Given a conference, return all sessions"""
# get Conference object from request
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# Return set of SessionForm belong to Conference
return SessionForms(items=[self._copySessionToForm(session) for session in conf.sessions])
@endpoints.method(SESSION_TYPE_GET_REQUEST,
SessionForms,
path='conference/{websafeConferenceKey}/sessions/type/{sessionType}',
http_method='GET',
name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Given a conference, return all sessions of a specified type (eg lecture, keynote, workshop)"""
# get Conference object from request
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# filter sessions by session type
sessions = conf.sessions.filter(Session.typeOfSession == str(request.sessionType))
# Return a set of SessionForm objects per session
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(SESSION_SPEAKER_GET_REQUEST,
SessionForms,
path='sessions/speaker/{speaker}',
http_method='GET',
name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Given a speaker, return all sessions given by this particular speaker, across all conferences"""
#filter session by speaker
sessions = Session.query(Session.speaker == Speaker(name=request.speaker))
# Return a set of SessionForm objects per session
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
def _createSessionObject(self, sessionForm):
"""Create Session object, return SessionForm."""
# ensure user is authenticated
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get the conference
conf = ndb.Key(urlsafe=sessionForm.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % sessionForm.conferenceKey)
# ensure ownership
if getUserId(user) != conf.organizerUserId:
raise endpoints.ForbiddenException('Only organizer of conference : %s can add sessions.' % conf.name)
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(sessionForm, field.name) for field in sessionForm.all_fields()}
# convert typeOfsession to string
if data['typeOfSession']:
data['typeOfSession']=str(data['typeOfSession'])
else:
data['typeOfSession']=str(SessionType.NOT_SPECIFIED)
del data['websafeKey']
del data['websafeConferenceKey']
# check required fields
for key in SESSION_REQUIRED_FIELDS:
if not data[key]:
raise endpoints.BadRequestException("'%s' field is required to create a session." % key)
# convert date string to a datetime object.
try:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
except (TypeError, ValueError):
raise endpoints.BadRequestException("Invalid date format. Please use 'YYYY-MM-DD'")
# convert date string to a time object. HH:MM
try:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
except (TypeError, ValueError):
raise endpoints.BadRequestException("Invalid date format. Please use 'HH:MM'")
if data['duration'] <= 0:
raise endpoints.BadRequestException("Duration must be greater than zero")
#session must be within conference start and end date only when dates
#defined at the time of conference creation
if conf.startDate and conf.endDate :
if data['date'] < conf.startDate or data['date'] > conf.endDate:
raise endpoints.BadRequestException("Session must be within range of conference start and end date")
data['speaker'] = Speaker(name=data['speaker'])
# Datastore to allocate an ID.
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
# Datastore returns an integer ID that we can use to create a session key
data['key'] = ndb.Key(Session, s_id, parent=conf.key)
# Add session to datastore
session = Session(**data)
session.put()
# Add a task to check and update new featured speaker
taskqueue.add(
params={'websafeConferenceKey': conf.key.urlsafe(), 'speaker': session.speaker.name},
url='/tasks/set_featured_speaker'
)
return self._copySessionToForm(session)
@endpoints.method(SESSION_POST_REQUEST,
SessionForm,
path='conference/sessions/{websafeConferenceKey}',
http_method='POST',
name='createSession')
def createSession(self, request):
"""Creates a session, open to the organizer of the conference"""
return self._createSessionObject(request)
def _copySessionToForm(self,session):
"""Copy fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date to date string; just copy others
if field.name.endswith('date'):
setattr(sf, field.name, getattr(session, field.name).strftime('%Y-%m-%d'))
elif field.name.endswith('startTime'):
setattr(sf, field.name, getattr(session, field.name).strftime('%H:%M'))
elif field.name.endswith('speaker'):
setattr(sf, field.name, session.speaker.name)
elif field.name.endswith('typeOfSession'):
setattr(sf, field.name, getattr(SessionType, getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
@endpoints.method(SESSION_WISHLIST_POST_REQUEST,
BooleanMessage,
path='profile/wishlist/{websafeSessionKey}',
http_method='POST',
name='addSessionToWishlist')
@ndb.transactional(xg=True)
def addSessionToWishlist(self, request):
"""adds the session to the user's list of sessions they are interested in attending"""
# get user Profile
prof = self._getProfileFromUser()
# get session and check if it exists
key = ndb.Key(urlsafe=request.websafeSessionKey)
session = key.get()
if not session:
raise endpoints.BadRequestException("Session with key %s doesn't exist" % request.sessionKey)
# ensure is not already in user's wishlist
if key in prof.wishList:
raise ConflictException("This session is already in user's wishlist")
# add session to user's list
prof.wishList.append(key)
prof.put()
return BooleanMessage(data=True)
@endpoints.method(message_types.VoidMessage,
SessionForms,
path='profile/wishlist/all',
http_method='GET',
name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""query for all the sessions in a conference that the user is interested in"""
# get user Profile
prof = self._getProfileFromUser()
# get all sessions in user's wishlist
sessions = ndb.get_multi(prof.wishList)
# return a set of `SessionForm` objects
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(SESSION_WISHLIST_POST_REQUEST,
BooleanMessage,
path='profile/wishlist/{websafeSessionKey}',
http_method='DELETE',
name='deleteSessionInWishlist')
@ndb.transactional()
def deleteSessionInWishlist(self, request):
"""removes the session from the user’s list of sessions they are interested in attending"""
# get user Profile
prof = self._getProfileFromUser()
key = ndb.Key(urlsafe=request.websafeSessionKey)
# get session the session key and check if it exists in user's wish list
if key not in prof.wishList:
raise endpoints.BadRequestException("Failed to find session in user's wishlist")
# remove session from user's wishlist
prof.wishList.remove(key)
prof.put()
return BooleanMessage(data=True)
#additional query endpoint
@endpoints.method(message_types.VoidMessage,
SessionForms,
path='conference/sessions/hour',
http_method='GET',
name='gethourSessions')
def gethourSessions(self,request):
""" Return all sessions that are of an hour or less """
sessions = Session.query(Session.duration <= 60)
#here duration is specified in minutes
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
def _getSessionQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Session.query()
inequality_filter, filters = self._formatFilters(request.filters,type='Session')
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Session.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Session.name)
for filtr in filters:
if filtr["field"] in ["duration"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
@endpoints.method(SessionQueryForms,
SessionForms,
path='querySessions',
http_method='POST',
name='querySessions')
def querySessions(self, request):
"""Query for sessions."""
# use `SESSION_FIELDS` to construct query.
sessions = self._getSessionQuery(request)
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
#special query problem
@endpoints.method(SESSION_POST_REQUEST,
SessionForms,
path='conference/{websafeConferenceKey}/sessions/typewithtime',
http_method='GET',
name='getTypewithTime')
def getTypewithTime(self,request):
"""Special query that handle couple of inequalities"""
wck=request.websafeConferenceKey
# get conference object
confKey=ndb.Key(urlsafe=wck)
if not confKey.get():
raise endpoints.NotFoundException('No conference found with key : %s' % wck)
query=Session.query(ancestor=confKey)
query=query.filter(Session.typeOfSession != str(SessionType.workshop))
query=query.order(Session.typeOfSession)
query=query.order(Session.date)
query=query.order(Session.startTime)
results=[session for session in query if session.startTime < time(19)]
return SessionForms(items=[self._copySessionToForm(session) for session in results])
@endpoints.method(message_types.VoidMessage,
StringMessage,
path='conference/featured_speakers/get',
http_method='GET',
name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Returns featured speaker along with their sessions from memcache"""
return StringMessage(data=memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) or "")
# registers API
api = endpoints.api_server([ConferenceApi])
| apache-2.0 | 3,213,191,701,473,363,000 | 38.264574 | 116 | 0.613094 | false | 4.297423 | false | false | false |
foursquare/fsqio | src/jvm/io/fsq/twofishes/scripts/match-flickr.py | 1 | 2018 | #!/usr/bin/python
import sys
import csv
import urllib
import urllib2
import json
import geojson
output = {}
files = sys.argv[1:]
for f in files:
fdata = open(f).read()
try:
data = geojson.loads(fdata)
except:
print 'failed to parse: ' + fdata
continue
for feature in data['features']:
woeid = str(feature['properties']['woe_id'])
label = feature['properties']['label']
woetype = int(feature['properties']['place_type_id'])
bbox = feature['geometry']['bbox']
url = u"http://localhost:8081/?query=%s&woeHint=%s" % (urllib.quote(label.encode('utf-8')), woetype)
try:
response = urllib2.urlopen(url)
data = response.read()
except:
print url
print "Unexpected error:", sys.exc_info()[0]
continue
jsonData = json.loads(data)
geocodes = False
match = False
for interp in jsonData['interpretations']:
if interp['what']:
break
fwoetype = interp['feature']['woeType']
geocodes = True
center = interp['feature']['geometry']['center']
if (
center['lat'] >= bbox[1] and
center['lat'] <= bbox[3] and
center['lng'] >= bbox[0] and
center['lng'] <= bbox[2]
):
match = True
geonameids = filter(lambda i: i['source'] == 'geonameid', interp['feature']['ids'])
if len(geonameids):
id = geonameids[0]['id']
if ((id not in output) or (output[id][0] == False)):
lowlng = bbox[0]
lowlat = bbox[1]
hilng = bbox[2]
hilat = bbox[3]
output[id] = (fwoetype == woetype, '%s\t%s\t%s\t%s\t%s' % (id, lowlng, lowlat, hilng, hilat))
if not geocodes:
print (u'No geocodes for %s %s' % (woeid, label)).encode('utf-8')
elif not match:
print (u'Geocodes, but no match for %s: %s' % (woeid, label)).encode('utf-8')
print bbox
print '\t' + url
outfile = open('flickr-bbox.tsv', 'w')
for k in output:
outfile.write('%s\n' % output[k][1])
| apache-2.0 | -910,351,895,282,670,600 | 25.552632 | 106 | 0.564916 | false | 3.275974 | false | false | false |
botswana-harvard/edc-lab | old/lab_clinic_api/migrations/0001_initial.py | 1 | 31522 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 13:52
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import django_revision.revision_field
import edc_base.model.fields.custom_fields
import edc_base.model.fields.hostname_modification_field
import edc_base.model.fields.userfield
import edc_base.model.fields.uuid_auto_field
import edc_base.model.validators.date
class Migration(migrations.Migration):
initial = True
dependencies = [
('edc_registration', '0002_auto_20160503_1604'),
]
operations = [
migrations.CreateModel(
name='Aliquot',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('aliquot_identifier', models.CharField(editable=False, help_text='Aliquot identifier', max_length=25, unique=True, verbose_name='Aliquot Identifier')),
('aliquot_datetime', models.DateTimeField(default=datetime.datetime(2016, 5, 7, 13, 51, 55, 444847), verbose_name='Date and time aliquot created')),
('count', models.IntegerField(editable=False, null=True)),
('medium', models.CharField(choices=[('tube_any', 'Tube'), ('tube_edta', 'Tube EDTA'), ('swab', 'Swab'), ('dbs_card', 'DBS Card')], default='TUBE', max_length=25, verbose_name='Medium')),
('original_measure', models.DecimalField(decimal_places=2, default='5.00', max_digits=10)),
('current_measure', models.DecimalField(decimal_places=2, default='5.00', max_digits=10)),
('measure_units', models.CharField(choices=[('mL', 'mL'), ('uL', 'uL'), ('spots', 'spots'), ('n/a', 'Not Applicable')], default='mL', max_length=25)),
('status', models.CharField(choices=[('available', 'available'), ('consumed', 'consumed')], default='available', max_length=25)),
('comment', models.CharField(blank=True, max_length=50, null=True)),
('subject_identifier', models.CharField(editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('is_packed', models.BooleanField(default=False, verbose_name='packed')),
('receive_identifier', models.CharField(editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('import_datetime', models.DateTimeField(editable=False, null=True)),
],
options={
'ordering': ('receive', 'count'),
},
),
migrations.CreateModel(
name='AliquotCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('name', models.CharField(db_index=True, help_text='(suggest 40 characters max.)', max_length=250, null=True, unique=True, verbose_name='Name')),
('short_name', models.CharField(db_index=True, help_text='This is the stored value, required', max_length=250, null=True, unique=True, verbose_name='Stored value')),
('display_index', models.IntegerField(db_index=True, default=0, help_text='Index to control display order if not alphabetical, not required', verbose_name='display index')),
('field_name', models.CharField(blank=True, editable=False, help_text='Not required', max_length=25, null=True)),
('version', models.CharField(default='1.0', editable=False, max_length=35)),
],
),
migrations.CreateModel(
name='AliquotType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('name', models.CharField(max_length=50, verbose_name='Description')),
('alpha_code', models.CharField(max_length=15, unique=True, validators=[django.core.validators.RegexValidator('^[A-Z]{2,15}$')], verbose_name='Alpha code')),
('numeric_code', models.CharField(max_length=2, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]{2}$')], verbose_name='Numeric code (2-digit)')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Order',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('order_identifier', models.CharField(db_index=True, editable=False, help_text='Allocated internally', max_length=25, unique=True, verbose_name='Order number')),
('order_datetime', models.DateTimeField(db_index=True, validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Order Date')),
('status', models.CharField(choices=[('PENDING', 'Pending'), ('PARTIAL', 'Partial'), ('COMPLETE', 'Complete'), ('ERROR', 'Error'), ('REDRAW', 'Redraw'), ('WITHDRAWN', 'Withdrawn'), ('DUPLICATE', 'Duplicate')], max_length=25, null=True, verbose_name='Status')),
('comment', models.CharField(blank=True, max_length=150, null=True, verbose_name='Comment')),
('import_datetime', models.DateTimeField(null=True)),
('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('aliquot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Aliquot')),
],
options={
'ordering': ['order_identifier'],
},
),
migrations.CreateModel(
name='Panel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('name', models.CharField(db_index=True, max_length=50, unique=True, verbose_name='Panel Name')),
('comment', models.CharField(blank=True, max_length=250, verbose_name='Comment')),
('edc_name', models.CharField(max_length=50, null=True)),
('panel_type', models.CharField(choices=[('TEST', 'Test panel'), ('STORAGE', 'Storage panel')], default='TEST', max_length=15)),
('aliquot_type', models.ManyToManyField(help_text='Choose all that apply', to='lab_clinic_api.AliquotType')),
],
),
migrations.CreateModel(
name='Receive',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('receive_identifier', models.CharField(db_index=True, editable=False, max_length=25, null=True, unique=True, verbose_name='Receiving Identifier')),
('requisition_identifier', models.CharField(blank=True, db_index=True, max_length=25, null=True, verbose_name='Requisition Identifier')),
('drawn_datetime', models.DateTimeField(db_index=True, validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Date and time drawn')),
('receive_datetime', models.DateTimeField(db_index=True, default=datetime.datetime(2016, 5, 7, 13, 51, 55, 407698), validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Date and time received')),
('visit', models.CharField(max_length=25, verbose_name='Visit Code')),
('clinician_initials', edc_base.model.fields.custom_fields.InitialsField(help_text='Type 2-3 letters, all in uppercase and no spaces', max_length=3, verbose_name='Initials')),
('receive_condition', models.CharField(max_length=50, null=True, verbose_name='Condition of primary tube')),
('import_datetime', models.DateTimeField(null=True)),
('registered_subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='edc_registration.RegisteredSubject')),
],
),
migrations.CreateModel(
name='Result',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('result_identifier', models.CharField(db_index=True, editable=False, max_length=25)),
('result_datetime', models.DateTimeField(db_index=True, help_text='Date result added to system.')),
('release_status', models.CharField(choices=[('NEW', 'New'), ('RELEASED', 'Released'), ('AMENDED', 'Amended')], db_index=True, default='NEW', max_length=25)),
('release_datetime', models.DateTimeField(blank=True, db_index=True, help_text='Date result authorized for release. This field will auto-fill if release status is changed', null=True)),
('release_username', models.CharField(blank=True, db_index=True, help_text='Username of person authorizing result for release. This field will auto-fill if release status is changed', max_length=50, null=True, verbose_name='Release username')),
('comment', models.CharField(blank=True, max_length=50, null=True, verbose_name='Comment')),
('dmis_result_guid', models.CharField(blank=True, editable=False, help_text='dmis import value. N/A unless data imported from old system', max_length=36, null=True)),
('import_datetime', models.DateTimeField(null=True)),
('reviewed', models.BooleanField(default=False)),
('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Order')),
],
options={
'ordering': ['result_identifier'],
},
),
migrations.CreateModel(
name='ResultItem',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('result_item_value', models.CharField(db_index=True, max_length=25, verbose_name='Result')),
('result_item_value_as_float', models.FloatField(db_index=True, editable=False, null=True, verbose_name='Numeric result')),
('result_item_quantifier', models.CharField(choices=[('=', '='), ('>', '>'), ('>=', '>='), ('<', '<'), ('<=', '<=')], default='=', max_length=25, verbose_name='Quantifier')),
('result_item_datetime', models.DateTimeField(db_index=True, verbose_name='Assay date and time')),
('result_item_operator', models.CharField(blank=True, db_index=True, max_length=50, null=True, verbose_name='Operator')),
('grade_range', models.CharField(blank=True, max_length=25, null=True)),
('grade_flag', models.CharField(blank=True, max_length=5, null=True)),
('grade_message', models.CharField(blank=True, max_length=50, null=True)),
('grade_warn', models.BooleanField(default=False)),
('reference_flag', models.CharField(blank=True, max_length=5, null=True)),
('reference_range', models.CharField(blank=True, max_length=25, null=True)),
('validation_status', models.CharField(choices=[('P', 'Preliminary'), ('F', 'Final'), ('R', 'Rejected')], db_index=True, default='P', help_text='Default is preliminary', max_length=10, verbose_name='Status')),
('validation_datetime', models.DateTimeField(blank=True, db_index=True, null=True)),
('validation_username', models.CharField(blank=True, db_index=True, max_length=50, null=True, verbose_name='Validation username')),
('validation_reference', models.CharField(blank=True, max_length=50, null=True, verbose_name='Validation reference')),
('comment', models.CharField(blank=True, max_length=50, null=True, verbose_name='Validation Comment')),
('error_code', models.CharField(blank=True, max_length=50, null=True, verbose_name='Error codes')),
('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('import_datetime', models.DateTimeField(null=True)),
('subject_type', models.CharField(max_length=25, null=True)),
('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Result')),
],
options={
'ordering': ('-result_item_datetime',),
},
),
migrations.CreateModel(
name='Review',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('title', models.CharField(editable=False, max_length=50)),
('review_datetime', models.DateTimeField(null=True)),
('review_status', models.CharField(choices=[('REQUIRES_REVIEW', 'Requires Review'), ('REVIEWED', 'Reviewed')], max_length=25)),
('comment', models.TextField(blank=True, max_length=500, null=True)),
],
options={
'ordering': ['review_datetime'],
},
),
migrations.CreateModel(
name='TestCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('code', models.CharField(max_length=15, unique=True, validators=[django.core.validators.RegexValidator('^[A-Z0-9\\%\\_\\-]{1,15}$', 'Ensure test code is uppercase alphanumeric ( with _ ,%) and no spaces')], verbose_name='Test Code')),
('name', models.CharField(max_length=50, verbose_name='Test Code Description')),
('units', models.CharField(choices=[('%', '%'), ('10^0/L', '10^0/L'), ('10^3/uL', '10^3/uL'), ('10^6/uL', '10^6/uL'), ('cells/ul', 'cells/ul'), ('copies/ml', 'copies/ml'), ('fL', 'fL'), ('g/dL', 'g/dL'), ('g/L', 'g/L'), ('mg/dL', 'mg/dL'), ('mg/L', 'mg/L'), ('mm^3', 'mm^3'), ('mm/H', 'mm/H'), ('mmol/L', 'mmol/L'), ('ng/ml', 'ng/ml'), ('pg', 'pg'), ('ratio', 'ratio'), ('U/L', 'U/L'), ('umol/L', 'umol/L')], max_length=25, verbose_name='Units')),
('display_decimal_places', models.IntegerField(blank=True, null=True, verbose_name='Decimal places to display')),
('is_absolute', models.CharField(choices=[('absolute', 'Absolute'), ('calculated', 'Calculated')], default='absolute', max_length=15, verbose_name='Is the value absolute or calculated?')),
('formula', models.CharField(blank=True, max_length=50, null=True, verbose_name='If calculated, formula?')),
('edc_code', models.CharField(db_index=True, max_length=25, null=True)),
('edc_name', models.CharField(db_index=True, max_length=50, null=True)),
],
options={
'ordering': ['edc_name'],
},
),
migrations.CreateModel(
name='TestCodeGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('code', models.CharField(max_length=15, null=True)),
('name', models.CharField(blank=True, max_length=25, null=True)),
],
options={
'ordering': ['code'],
},
),
migrations.AddField(
model_name='testcode',
name='test_code_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.TestCodeGroup'),
),
migrations.AddField(
model_name='resultitem',
name='test_code',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='lab_clinic_api.TestCode'),
),
migrations.AddField(
model_name='result',
name='review',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Review'),
),
migrations.AddField(
model_name='panel',
name='test_code',
field=models.ManyToManyField(blank=True, null=True, to='lab_clinic_api.TestCode'),
),
migrations.AddField(
model_name='order',
name='panel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Panel'),
),
migrations.AddField(
model_name='aliquot',
name='aliquot_condition',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.AliquotCondition', verbose_name='Aliquot Condition'),
),
migrations.AddField(
model_name='aliquot',
name='aliquot_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.AliquotType', verbose_name='Aliquot Type'),
),
migrations.AddField(
model_name='aliquot',
name='primary_aliquot',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='primary', to='lab_clinic_api.Aliquot'),
),
migrations.AddField(
model_name='aliquot',
name='receive',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Receive'),
),
migrations.AddField(
model_name='aliquot',
name='source_aliquot',
field=models.ForeignKey(editable=False, help_text='Aliquot from which this aliquot was created, Leave blank if this is the primary tube', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='source', to='lab_clinic_api.Aliquot'),
),
migrations.AlterUniqueTogether(
name='aliquot',
unique_together=set([('receive', 'count')]),
),
]
| gpl-2.0 | 2,280,493,389,148,608,000 | 91.985251 | 463 | 0.645327 | false | 3.959056 | true | false | false |
easy-as-pie-labs/tweap | tweap/project_management/tests.py | 1 | 11427 | from django.test import TestCase
from project_management.models import Project, Invitation, Tag
from project_management.tools import invite_users, get_tags
from django.contrib.auth.models import User
import json
from django.http.response import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed
class ModelTest(TestCase):
project_name = "Testproject"
project_description = "Testdescription"
def test_project_model_members_and_leave(self):
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
user2 = User.objects.create_user('testuser2', '[email protected]', 'testpw')
project = Project(name=self.project_name, description=self.project_description)
project.save()
self.assertEqual(str(project), self.project_name)
project.members.add(user)
project.members.add(user2)
# test if users are in project now
self.assertTrue(user in project.members.all())
self.assertTrue(user2 in project.members.all())
project.leave(user2)
project_exists = Project.objects.filter(id=project.id).exists()
# test if user2 is removed from project and project still exists
self.assertTrue(project_exists)
self.assertTrue(user in project.members.all())
self.assertFalse(user2 in project.members.all())
project.leave(user)
project_exists = Project.objects.filter(id=project.id).exists()
# test if leave of last user deletes the project
self.assertFalse(project_exists)
# cleanup
user.delete()
user2.delete()
def test_invitation_model_get_for_users(self):
project = Project(name=self.project_name, description=self.project_description)
project.save()
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
invitation = Invitation(user=user, project=project)
invitation.save()
self.assertEqual(str(invitation), user.username + ' invited to ' + self.project_name)
# test if invitation is returned for the user via the method get_for_user()
self.assertTrue(invitation in Invitation.get_for_user(user))
invitation.delete()
# cleanup
user.delete()
def test_invitation_model_accept(self):
project = Project(name=self.project_name, description=self.project_description)
project.save()
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
invitation = Invitation(user=user, project=project)
invitation.save()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if invitation exists
self.assertTrue(invitation_exists)
invitation.accept()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if user is now member of the project and invitation was deleted
self.assertTrue(user in project.members.all())
self.assertFalse(invitation_exists)
# cleanup
user.delete()
def test_invitation_model_reject(self):
project = Project(name=self.project_name, description=self.project_description)
project.save()
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
invitation = Invitation(user=user, project=project)
invitation.save()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if invitation exists
self.assertTrue(invitation_exists)
invitation.reject()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if user is not member of the project and invitation was deleted
self.assertFalse(user in project.members.all())
self.assertFalse(invitation_exists)
# cleanup
user.delete()
def test_has_user(self):
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
user2 = User.objects.create_user('testuser2', '[email protected]', 'testpw')
user3 = User.objects.create_user('testuser3', '[email protected]', 'testpw')
project = Project(name=self.project_name, description=self.project_description)
project.save()
self.assertEqual(str(project), self.project_name)
project.members.add(user)
project.members.add(user2)
# test if users are in project now
self.assertTrue(project.has_user(user))
self.assertTrue(project.has_user(user2))
self.assertFalse(project.has_user(user3))
project.leave(user2)
project_exists = Project.objects.filter(id=project.id).exists()
# test if user2 is removed from project and project still exists
self.assertTrue(project.has_user(user))
self.assertFalse(project.has_user(user2))
self.assertFalse(project.has_user(user3))
project.leave(user)
project_exists = Project.objects.filter(id=project.id).exists()
# test if leave of last user deletes the project
self.assertFalse(project_exists)
# cleanup
user.delete()
user2.delete()
user3.delete()
class ToolsTest(TestCase):
def test_invite_users(self):
project = Project(name="Testprojekt")
project.save()
user1 = User.objects.create_user('user1', '[email protected]', 'testpw')
user2 = User.objects.create_user('user2', '[email protected]', 'testpw')
user3 = User.objects.create_user('user3', '[email protected]', 'testpw')
# test with username and email
user_string = ['user1', '[email protected]', 'test']
user_string = json.dumps(user_string)
invite_users(user_string, project)
# test if the both users are invited
self.assertTrue(Invitation.objects.filter(user=user1, project=project).exists())
self.assertTrue(Invitation.objects.filter(user=user2, project=project).exists())
self.assertFalse(Invitation.objects.filter(user=user3, project=project).exists())
#cleanup
user1.delete()
user2.delete()
user3.delete()
def test_get_tags(self):
project = Project(name="Testprojekt")
project.save()
tag = Tag(name="testtag1", project=project)
tag.save()
#test if only testtag1 exists
self.assertTrue(Tag.objects.filter(project=project, name="testtag1").exists())
self.assertFalse(Tag.objects.filter(project=project, name="testtag2").exists())
self.assertFalse(Tag.objects.filter(project=project, name="testtag3").exists())
tag_string = ['testttag1', 'testtag2', 'testtag3']
tag_string = json.dumps(tag_string)
tags = get_tags(tag_string, project)
#test if return list contains 3 Tags
self.assertEquals(len(tags), 3)
self.assertIsInstance(tags[0], Tag)
#test that all 3 testtags exists now
self.assertTrue(Tag.objects.filter(project=project, name="testtag1").exists())
self.assertTrue(Tag.objects.filter(project=project, name="testtag2").exists())
self.assertTrue(Tag.objects.filter(project=project, name="testtag3").exists())
class ViewsTest(TestCase):
def setup_login(self):
User.objects.create_user('user', '[email protected]', 'testpw')
self.client.post('/users/login/', {'username': 'user', 'password': 'testpw'})
def test_project_create_edit(self):
self.setup_login()
# test if page is available
resp = self.client.get('/projects/new/')
self.assertEqual(resp.status_code, 200)
self.assertFalse('error_messages' in resp.context)
# test if validation works
resp = self.client.post('/projects/new/', {})
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.context['error_messages'])
# test if project with name only can be created
resp = self.client.post('/projects/new/', {'name': 'TestCreateProject', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
project_exist = Project.objects.filter(name='TestCreateProject').exists()
self.assertTrue(project_exist)
# test if project with name and description can be created
resp = self.client.post('/projects/new/', {'name': 'TestCreateProject2', 'description': 'I am a test project', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
project_exist = Project.objects.filter(name='TestCreateProject2').exists()
self.assertTrue(project_exist)
project = Project.objects.get(name='TestCreateProject2')
self.assertEqual(project.description, 'I am a test project')
# test if a non existing project retuns 404
resp = self.client.get('/projects/edit/9999/')
self.assertEqual(resp.status_code, 404)
# test if an existing project can be edited
resp = self.client.get('/projects/edit/' + str(project.id) + '/')
self.assertEqual(resp.status_code, 200)
# test if changes are saved
resp = self.client.post('/projects/edit/' + str(project.id) + '/', {'name': 'new name', 'description': 'new description', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
project = Project.objects.get(id=project.id)
self.assertEqual(project.name, 'new name')
self.assertEqual(project.description, 'new description')
def test_project_view(self):
self.setup_login()
# test if project with name only can be created
resp = self.client.post('/projects/new/', {'name': 'TestCreateProject', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
project_exists = Project.objects.filter(name='TestCreateProject').exists()
self.assertTrue(project_exists)
project = Project.objects.get(name='TestCreateProject')
print('test: acces own project')
resp = self.client.get('/projects/' + str(project.id))
self.assertEqual(resp.status_code, 200)
self.assertTrue(type(resp) is HttpResponse)
resp = self.client.post('/projects/' + str(project.id))
self.assertTrue(type(resp) is HttpResponseNotAllowed)
print('test non-existent project')
resp = self.client.get('/projects/1337')
self.assertEqual(resp.status_code, 404)
self.client.get('/users/logout/')
print('test: access \'own\' project when not logged in')
resp = self.client.get('/projects/' + str(project.id))
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
User.objects.create_user('anotheruser', '[email protected]', 'testpw')
self.client.post('/users/login/', {'username': 'anotheruser', 'password': 'testpw'})
print('test: someone else\'s project')
resp = self.client.get('/projects/' + str(project.id))
self.assertEqual(resp.status_code, 404)
def test_view_all(self):
# TODO: renew tests
pass
def test_view_invites(self):
# TODO: renew tests
pass
def test_leave(self):
pass
def test_invitation_handler(self):
pass
| gpl-3.0 | 4,192,250,747,644,782,600 | 40.552727 | 161 | 0.655203 | false | 3.896011 | true | false | false |
Wyn10/Cnchi | cnchi/ui/gtk/pages/features.py | 1 | 13668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# features.py
#
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Features screen """
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import subprocess
import logging
import desktop_info
import features_info
import misc.extra as misc
from ui.base_widgets import Page
COL_IMAGE = 0
COL_TITLE = 1
COL_DESCRIPTION = 2
COL_SWITCH = 3
class Features(Page):
""" Features screen class """
def __init__(self, params, prev_page="desktop", next_page="disk_grp", **kwargs):
""" Initializes features ui """
super().__init__(self, params, name="features", prev_page=prev_page,
next_page=next_page, **kwargs)
self.listbox_rows = {}
self.title = _("Features")
self.in_group = True
# Set up list box
self.listbox = self.ui.get_object("listbox")
self.listbox.set_selection_mode(Gtk.SelectionMode.NONE)
self.listbox.set_sort_func(self.listbox_sort_by_name, None)
# self.listbox.set_selection_mode(Gtk.SelectionMode.BROWSE)
# self.listbox.connect("row-selected", self.on_listbox_row_selected)
# This is initialized each time this screen is shown in prepare()
self.features = None
# Only show ufw rules and aur disclaimer info once
self.info_already_shown = {"ufw": False, "aur": False}
# Only load defaults the first time this screen is shown
self.load_defaults = True
@staticmethod
def nvidia_detected():
from hardware.nvidia import Nvidia
if Nvidia().detect():
return True
from hardware.nvidia_340xx import Nvidia340xx
if Nvidia340xx().detect():
return True
from hardware.nvidia_304xx import Nvidia304xx
if Nvidia304xx().detect():
return True
return False
@staticmethod
def amd_detected():
from hardware.catalyst import Catalyst
return Catalyst().detect()
@staticmethod
def on_listbox_row_selected(listbox, listbox_row):
""" Someone selected a different row of the listbox
WARNING: IF LIST LAYOUT IS CHANGED THEN THIS SHOULD BE CHANGED ACCORDINGLY. """
if listbox_row is not None:
for vbox in listbox_row:
switch = vbox.get_children()[2]
if switch:
switch.set_active(not switch.get_active())
def fill_listbox(self):
for listbox_row in self.listbox.get_children():
listbox_row.destroy()
self.listbox_rows = {}
# Only add graphic-driver feature if an AMD or Nvidia is detected
# FIXME: Conflict between lib32-nvidia-libgl and lib32-mesa-libgl
if "graphic_drivers" in self.features:
if not self.amd_detected() and not self.nvidia_detected():
logging.debug("Neither NVidia nor AMD have been detected.")
self.features.remove("graphic_drivers")
#if "graphic_drivers" in self.features:
# self.features.remove("graphic_drivers")
for feature in self.features:
box = Gtk.Box(spacing=20)
box.set_name(feature + "-row")
self.listbox_rows[feature] = []
if feature in features_info.ICON_NAMES:
icon_name = features_info.ICON_NAMES[feature]
else:
logging.debug("No icon found for feature %s", feature)
icon_name = "missing"
object_name = "image_" + feature
image = Gtk.Image.new_from_icon_name(
icon_name,
Gtk.IconSize.DND)
image.set_name(object_name)
image.set_property('margin_start', 10)
self.listbox_rows[feature].append(image)
box.pack_start(image, False, False, 0)
text_box = Gtk.VBox()
object_name = "label_title_" + feature
label_title = Gtk.Label.new()
label_title.set_halign(Gtk.Align.START)
label_title.set_justify(Gtk.Justification.LEFT)
label_title.set_name(object_name)
self.listbox_rows[feature].append(label_title)
text_box.pack_start(label_title, False, False, 0)
object_name = "label_" + feature
label = Gtk.Label.new()
label.set_name(object_name)
self.listbox_rows[feature].append(label)
text_box.pack_start(label, False, False, 0)
box.pack_start(text_box, False, False, 0)
object_name = "switch_" + feature
switch = Gtk.Switch.new()
switch.set_name(object_name)
switch.set_property('margin_top', 10)
switch.set_property('margin_bottom', 10)
switch.set_property('margin_end', 10)
switch.get_style_context().add_class('switch')
switch.set_property('width_request', 200)
self.listbox_rows[feature].append(switch)
box.pack_end(switch, False, False, 0)
# Add row to our gtklist
self.listbox.add(box)
self.listbox.get_style_context().add_class('list_box')
self.listbox.show_all()
@staticmethod
def listbox_sort_by_name(row1, row2, user_data):
""" Sort function for listbox
Returns : < 0 if row1 should be before row2, 0 if they are equal and > 0 otherwise
WARNING: IF LAYOUT IS CHANGED IN fill_listbox THEN THIS SHOULD BE
CHANGED ACCORDINGLY. """
box1 = row1.get_child()
txt_box1 = box1.get_children()[1]
label1 = txt_box1.get_children()[0]
box2 = row2.get_child()
txt_box2 = box2.get_children()[1]
label2 = txt_box2.get_children()[0]
text = [label1.get_text(), label2.get_text()]
# sorted_text = misc.sort_list(text, self.settings.get("locale"))
sorted_text = misc.sort_list(text)
# If strings are already well sorted return < 0
if text[0] == sorted_text[0]:
return -1
# Strings must be swaped, return > 0
return 1
def set_row_text(self, feature, title, desc, tooltip):
""" Set translated text to our listbox feature row """
if feature in self.listbox_rows:
title = "<span weight='bold' size='large'>{0}</span>".format(title)
desc = "<span size='small'>{0}</span>".format(desc)
row = self.listbox_rows[feature]
row[COL_TITLE].set_markup(title)
row[COL_DESCRIPTION].set_markup(desc)
for widget in row:
widget.set_tooltip_markup(tooltip)
def translate_ui(self):
""" Translates all ui elements """
self.header.set_subtitle(self.title)
for feature in self.features:
if feature == "graphic_drivers":
# Only add this feature if NVIDIA or AMD are detected
if not self.amd_detected() and not self.nvidia_detected():
continue
title = _(features_info.TITLES[feature])
desc = _(features_info.DESCRIPTIONS[feature])
tooltip = _(features_info.TOOLTIPS[feature])
self.set_row_text(feature, title, desc, tooltip)
# Sort listbox items
self.listbox.invalidate_sort()
def switch_defaults_on(self):
""" Enable some features by default """
if 'bluetooth' in self.features:
try:
process1 = subprocess.Popen(["lsusb"], stdout=subprocess.PIPE)
process2 = subprocess.Popen(
["grep", "-i", "bluetooth"],
stdin=process1.stdout,
stdout=subprocess.PIPE)
process1.stdout.close()
out, process_error = process2.communicate()
if out.decode() is not '':
row = self.listbox_rows['bluetooth']
row[COL_SWITCH].set_active(True)
except subprocess.CalledProcessError as err:
logging.warning(
"Error checking bluetooth presence. Command %s failed: %s",
err.cmd,
err.output)
if 'cups' in self.features:
row = self.listbox_rows['cups']
row[COL_SWITCH].set_active(True)
if 'visual' in self.features:
row = self.listbox_rows['visual']
row[COL_SWITCH].set_active(True)
def store_values(self):
""" Get switches values and store them """
for feature in self.features:
row = self.listbox_rows[feature]
is_active = row[COL_SWITCH].get_active()
self.settings.set("feature_" + feature, is_active)
if is_active:
logging.debug("Feature '%s' has been selected", feature)
# Show ufw info message if ufw is selected (show it only once)
if self.settings.get("feature_firewall") and not self.info_already_shown["ufw"]:
self.show_info_dialog("ufw")
self.info_already_shown["ufw"] = True
# Show AUR disclaimer if AUR is selected (show it only once)
if self.settings.get("feature_aur") and not self.info_already_shown["aur"]:
self.show_info_dialog("aur")
self.info_already_shown["aur"] = True
# LAMP: Ask user if he wants Apache or Nginx
if self.settings.get("feature_lamp"):
info = Gtk.MessageDialog(
transient_for=self.get_main_window(),
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.YES_NO)
info.set_markup("LAMP / LEMP")
msg = _("Do you want to install the Nginx server instead of the Apache server?")
info.format_secondary_markup(msg)
response = info.run()
info.destroy()
if response == Gtk.ResponseType.YES:
self.settings.set("feature_lemp", True)
else:
self.settings.set("feature_lemp", False)
self.listbox_rows = {}
return True
def show_info_dialog(self, feature):
""" Some features show an information dialog when this screen is accepted """
if feature == "aur":
# Aur disclaimer
txt1 = _("Arch User Repository - Disclaimer")
txt2 = _("The Arch User Repository is a collection of user-submitted PKGBUILDs\n"
"that supplement software available from the official repositories.\n\n"
"The AUR is community driven and NOT supported by Arch or Antergos.\n")
elif feature == "ufw":
# Ufw rules info
txt1 = _("Uncomplicated Firewall will be installed with these rules:")
toallow = misc.get_network()
txt2 = _("ufw default deny\nufw allow from {0}\nufw allow Transmission\n"
"ufw allow SSH").format(toallow)
else:
# No message
return
txt1 = "<big>{0}</big>".format(txt1)
txt2 = "<i>{0}</i>".format(txt2)
info = Gtk.MessageDialog(
transient_for=self.get_main_window(),
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.CLOSE)
info.set_markup(txt1)
info.format_secondary_markup(txt2)
info.run()
info.destroy()
def prepare(self, direction):
""" Prepare features screen to get ready to show itself """
# Each desktop has its own features
desktop = self.settings.get('desktop')
self.features = list(
set(desktop_info.ALL_FEATURES) -
set(desktop_info.EXCLUDED_FEATURES[desktop]))
self.fill_listbox()
self.translate_ui()
self.show_all()
if self.load_defaults:
self.switch_defaults_on()
# Only load defaults once
self.load_defaults = False
else:
# Load values user has chosen when this screen is shown again
self.load_values()
def load_values(self):
""" Get previous selected switches values """
for feature in self.features:
row = self.listbox_rows[feature]
is_active = self.settings.get("feature_" + feature)
if row[COL_SWITCH] is not None and is_active is not None:
row[COL_SWITCH].set_active(is_active)
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
if __name__ == '__main__':
from test_screen import _, run
run('Features')
| gpl-3.0 | -3,563,286,189,405,870,000 | 36.138587 | 94 | 0.585791 | false | 3.993863 | false | false | false |
SVilgelm/CloudFerry | cloudferry/lib/os/storage/plugins/copy_mechanisms.py | 1 | 5969 | # Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import random
from cloudferry.lib.utils import files
from cloudferry.lib.utils import remote_runner
from cloudferry.lib.copy_engines import base
LOG = logging.getLogger(__name__)
class CopyFailed(RuntimeError):
pass
class CopyMechanism(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def copy(self, context, source_object, destination_object):
raise NotImplementedError()
class CopyObject(object):
def __init__(self, host=None, path=None):
self.host = host
self.path = path
def __repr__(self):
return "{host}:{path}".format(host=self.host, path=self.path)
class RemoteFileCopy(CopyMechanism):
"""Uses one of `rsync`, `bbcp` or `scp` to copy volume files across remote
nodes. Primarily used for NFS backend."""
def copy(self, context, source_object, destination_object):
data = {
'host_src': source_object.host,
'path_src': source_object.path,
'host_dst': destination_object.host,
'path_dst': destination_object.path
}
try:
copier = base.get_copier_checked(context.src_cloud,
context.dst_cloud,
data)
copier.transfer(data)
except (base.FileCopyError,
base.CopierCannotBeUsed,
base.CopierNotFound,
base.NotEnoughSpace) as e:
msg = ("Copying file from {src_host}@{src_file} to "
"{dst_host}@{dst_file}, error: {err}").format(
src_host=source_object.host,
src_file=source_object.path,
dst_host=destination_object.host,
dst_file=destination_object.path,
err=e.message)
raise CopyFailed(msg)
class CopyRegularFileToBlockDevice(CopyMechanism):
"""Redirects regular file to stdout and copies over ssh tunnel to calling
node into block device"""
@staticmethod
def _generate_session_name():
return 'copy_{}'.format(random.getrandbits(64))
def copy(self, context, source_object, destination_object):
cfg_src = context.cfg.src
cfg_dst = context.cfg.dst
src_user = cfg_src.ssh_user
dst_user = cfg_dst.ssh_user
src_host = source_object.host
dst_host = destination_object.host
rr_src = remote_runner.RemoteRunner(src_host, src_user, sudo=True,
password=cfg_src.ssh_sudo_password)
rr_dst = remote_runner.RemoteRunner(dst_host, dst_user, sudo=True,
password=cfg_dst.ssh_sudo_password)
ssh_opts = ('-o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no')
# Choose auxiliary port for SSH tunnel
aux_port_start, aux_port_end = \
context.cfg.migrate.ssh_transfer_port.split('-')
aux_port = random.randint(int(aux_port_start), int(aux_port_end))
session_name = self._generate_session_name()
try:
progress_view = ""
if files.is_installed(rr_src, "pv"):
src_file_size = files.remote_file_size(rr_src,
source_object.path)
progress_view = "pv --size {size} --progress | ".format(
size=src_file_size)
# First step: prepare netcat listening on aux_port on dst and
# forwarding all the data to block device
rr_dst.run('screen -S {session_name} -d -m /bin/bash -c '
'\'nc -l {aux_port} | dd of={dst_device} bs=64k\'; '
'sleep 1',
session_name=session_name, aux_port=aux_port,
dst_device=destination_object.path)
# Second step: create SSH tunnel between source and destination
rr_src.run('screen -S {session_name} -d -m ssh {ssh_opts} -L'
' {aux_port}:127.0.0.1:{aux_port} '
'{dst_user}@{dst_host}; sleep 1',
session_name=session_name, ssh_opts=ssh_opts,
aux_port=aux_port, dst_user=dst_user,
dst_host=dst_host)
# Third step: push data through the tunnel
rr_src.run('/bin/bash -c \'dd if={src_file} bs=64k | '
'{progress_view} nc 127.0.0.1 {aux_port}\'',
aux_port=aux_port, progress_view=progress_view,
src_file=source_object.path)
except remote_runner.RemoteExecutionError as e:
msg = "Cannot copy {src_object} to {dst_object}: {error}"
msg = msg.format(src_object=source_object,
dst_object=destination_object,
error=e.message)
raise CopyFailed(msg)
finally:
try:
rr_src.run('screen -X -S {session_name} quit || true',
session_name=session_name)
rr_dst.run('screen -X -S {session_name} quit || true',
session_name=session_name)
except remote_runner.RemoteExecutionError:
LOG.error('Failed to close copy sessions', exc_info=True)
| apache-2.0 | -7,871,854,656,423,564,000 | 37.75974 | 79 | 0.565756 | false | 4.071623 | false | false | false |
saullocastro/pyNastran | pyNastran/bdf/dev_vectorized/cards/elements/rod/ptube.py | 1 | 8276 | from __future__ import print_function
from six.moves import zip
from numpy import array, zeros, unique, searchsorted, arange, pi
from pyNastran.bdf.dev_vectorized.cards.elements.property import Property
#from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import (integer,
double, double_or_blank)
class PTUBE(Property):
type = 'PTUBE'
def __init__(self, model):
"""
Defines the PTUBE object.
Parameters
----------
model : BDF
the BDF object
"""
Property.__init__(self, model)
def allocate(self, ncards):
self.n = ncards
self.model.log.debug('%s ncards=%s' % (self.type, ncards))
float_fmt = self.model.float_fmt
#: Property ID
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.OD = zeros((ncards, 2), float_fmt)
self.t = zeros(ncards, float_fmt)
self.nsm = zeros(ncards, float_fmt)
def add_card(self, card, comment=''):
self.model.log.debug('n=%s i=%s' % (self.n, self.i))
i = self.i
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
OD1 = double(card, 3, 'OD1')
t = double_or_blank(card, 4, 't')
if t is None:
t = OD1 / 2.
self.t[i] = t
self.nsm[i] = double_or_blank(card, 5, 'nsm', 0.0)
OD2 = double_or_blank(card, 6, 'OD2', OD1)
self.OD[i, :] = [OD1, OD2]
assert len(card) <= 7, 'len(PTUBE card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
"""
:param cards: the list of PTUBE cards
"""
if self.n:
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.OD = self.OD[i, :]
self.t = self.t[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PTUBE IDs...')
self._cards = []
self._comments = []
else:
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'property' : pid_map,
'material' : mid_map,
}
"""
if self.n:
nid_map = maps['node']
mid_map = maps['material']
for i, (pid, mid) in enumerate(zip(self.property_id, self.material_id)):
self.property_id[i] = pid_map[pid]
self.material_id[i] = mid_map[mid]
#=========================================================================
def get_mass_per_length_by_property_id(self, property_id=None):
# L * (A * rho + nsm)
i = self.get_property_index_by_property_id(property_id)
A = self.A[i]
mid = self.material_id[i]
#mat = self.model.materials.get_material(mid)
rho = self.model.materials.get_density_by_material_id(mid)
nsm = self.nsm[i]
return A * rho + nsm
def get_area_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
return self.get_area_by_property_index(i)
def get_area_by_property_index(self, i=None):
area = zeros(len(i), dtype='float64')
for ni, ii in enumerate(i):
A = (self._area1(ii) + self._area2(ii)) / 2.
area[ni] = A
return area
def _area1(self, i):
"""Gets the Area of Section 1 of the CTUBE."""
Dout = self.OD[i, 0]
if self.t[i] == 0:
return pi / 4. * Dout**2
Din = Dout - 2 * self.t
A1 = pi / 4. * (Dout * Dout - Din * Din)
return A1
def _area2(self, i):
"""Gets the Area of Section 2 of the CTUBE."""
Dout = self.OD[i, 1]
if self.t[i] == 0:
return pi / 4. * Dout**2
Din = Dout - 2 * self.t
A2 = pi / 4. * (Dout * Dout - Din * Din)
return A2
def get_non_structural_mass_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
nsm = self.nsm[i]
return nsm
#def get_E_by_property_id(self, property_id=None):
#i = self.get_property_index_by_property_id(property_id)
#material_id = self.material_id[i]
#E = self.model.materials.get_E_by_material_id(material_id)
#return E
def get_E_by_property_id(self, property_id=None):
mid = self.get_material_id_by_property_id(property_id)
E = self.model.materials.get_E_by_material_id(mid)
return E
#def get_G_by_property_id(self, property_id=None):
#i = self.get_property_index_by_property_id(property_id)
#material_id = self.material_id[i]
#G = self.model.materials.get_G_by_material_id(material_id)
#return G
def get_G_by_property_id(self, property_id=None):
mid = self.get_material_id_by_property_id(property_id)
G = self.model.materials.get_G_by_material_id(mid)
return G
def get_J_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
return self.get_J_by_property_index(i)
def get_J_by_property_index(self, i=None):
J = []
for ni, ii in enumerate(i):
Ji = self._Ji(ii)
J.append(Ji)
return array(J, dtype='float64')
def _Ji(self, i):
Dout = self.OD[i, 0]
if self.t[0] == 0.0:
return pi / 8. * Dout**4
Din = Dout - 2 * self.t[i]
return pi / 8. * (Dout**4 - Din**2)
def get_c_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
c = self.c[i]
return c
def get_material_id_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
mid = self.material_id[i]
return mid
#=========================================================================
def get_density_by_property_id(self, property_id=None):
mid = self.get_material_id_by_property_id(property_id)
density = self.model.materials.get_density_by_material_id(mid)
return density
#def get_J_by_property_id(self, property_id=None):
#mid = self.get_material_id_by_property_id(property_id)
#J = self.model.materials.get_J_by_material_id(mid)
#return J
#def get_E_by_property_id(self, property_id=None):
#mid = self.get_material_id_by_property_id(property_id)
#E = self.model.materials.get_E_by_material_id(mid)
#return E
#=========================================================================
def write_card(self, bdf_file, size=8, property_id=None):
if self.n:
if self.n:
if property_id is None:
i = arange(self.n)
else:
assert len(unique(property_id)) == len(property_id), unique(property_id)
i = searchsorted(self.property_id, property_id)
for (pid, mid, (OD1, OD2), t, nsm) in zip(
self.property_id, self.material_id[i], self.OD[i, :], self.t[i], self.nsm[i]):
#t = set_blank_if_default(t, OD1 / 2.)
#nsm = set_blank_if_default(nsm, 0.0)
#OD2 = set_blank_if_default(OD2, OD1)
card = ['PTUBE', pid, mid, OD1, t, nsm, OD2]
bdf_file.write(print_card_8(card))
def slice_by_index(self, i):
i = self._validate_slice(i)
obj = PTUBE(self.model)
n = len(i)
obj.n = n
obj.i = n
#obj._cards = self._cards[i]
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.property_id = self.property_id[i]
obj.material_id = self.material_id[i]
obj.OD = self.OD[i, :]
obj.t = self.t[i]
obj.nsm = self.nsm[i]
return obj
| lgpl-3.0 | -8,218,476,148,353,684,000 | 33.773109 | 95 | 0.531658 | false | 3.168453 | false | false | false |
calebbrown/calebcc | feedgenerator/feeds.py | 1 | 8744 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title=u"Poynter E-Media Tidbits",
... link=u"http://www.poynter.org/column.asp?id=31",
... description=u"A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language=u"en",
... )
>>> feed.add_item(
... title="Hello",
... link=u"http://www.holovaty.com/test/",
... description="Testing."
... )
>>> fp = open('test.rss', 'w')
>>> feed.write(fp, 'utf-8')
>>> fp.close()
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from xmlutils import SimplerXMLGenerator
from utils import rfc2822_date, rfc3339_date, get_tag_uri
from base import SyndicationFeed, Enclosure
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u"rss", self.rss_attributes())
handler.startElement(u"channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement(u"rss")
def rss_attributes(self):
return {u"version": self._version,
u"xmlns:atom": u"http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement(u'item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"item")
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", self.feed['link'])
handler.addQuickElement(u"description", self.feed['description'])
handler.addQuickElement(u"atom:link", None, {u"rel": u"self", u"href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement(u"language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"copyright", self.feed['feed_copyright'])
handler.addQuickElement(u"lastBuildDate", rfc2822_date(self.latest_post_date()).decode('utf-8'))
if self.feed['ttl'] is not None:
handler.addQuickElement(u"ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement(u"channel")
class RssUserland091Feed(RssFeed):
_version = u"0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = u"2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(u"author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement(u"author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(u"dc:creator", item["author_name"], {u"xmlns:dc": u"http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('utf-8'))
if item['comments'] is not None:
handler.addQuickElement(u"comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement(u"guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement(u"ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"enclosure", '',
{u"url": item['enclosure'].url, u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf8'
ns = u"http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u'feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement(u"feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {u"xmlns": self.ns, u"xml:lang": self.feed['language']}
else:
return {u"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']})
handler.addQuickElement(u"id", self.feed['id'])
handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('utf-8'))
if self.feed['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement(u"email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement(u"uri", self.feed['author_link'])
handler.endElement(u"author")
if self.feed['subtitle'] is not None:
handler.addQuickElement(u"subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", "", {u"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement(u"entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"entry")
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"})
if item['pubdate'] is not None:
handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('utf-8'))
# Author information.
if item['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement(u"email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement(u"uri", item['author_link'])
handler.endElement(u"author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement(u"id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement(u"summary", item['description'], {u"type": u"html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"link", '',
{u"rel": u"enclosure",
u"href": item['enclosure'].url,
u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", u"", {u"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement(u"rights", item['item_copyright'])
| bsd-3-clause | -3,889,700,314,081,301,500 | 41.653659 | 123 | 0.613564 | false | 3.669324 | false | false | false |
riklaunim/django-custom-multisite | django/contrib/auth/models.py | 1 | 17160 | import urllib
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
# UNUSABLE_PASSWORD is still imported here for backwards compatibility
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable, UNUSABLE_PASSWORD)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from audioapp.apps import multisite
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
def natural_key(self):
return (self.name,)
class UserManager(multisite.CurrentSiteManager, models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def create_user(self, username, email=None, password=None):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = UserManager.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(username=username)
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
if obj is not None:
permissions.update(backend.get_all_permissions(user, obj))
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if anon or active or backend.supports_inactive_user:
if hasattr(backend, "has_perm"):
if obj is not None:
if backend.has_perm(user, perm, obj):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if anon or active or backend.supports_inactive_user:
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(multisite.MultiSitesMixin, multisite.SiteFieldMixin,
models.Model):
"""
Users within the Django authentication system are represented by this
model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
last_login = models.DateTimeField(_('last login'), default=timezone.now)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'))
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text='Specific permissions for this user.')
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
unique_together = ('username', 'site')
def __unicode__(self):
return self.username
def natural_key(self):
return (self.username,)
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save()
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
permissions.update(backend.get_group_permissions(self,
obj))
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable(
'You need to set AUTH_PROFILE_MODULE in your project '
'settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in '
'the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check '
'AUTH_PROFILE_MODULE in your project settings')
self._profile_cache = model._default_manager.using(
self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| bsd-3-clause | 6,168,093,177,801,387,000 | 34.824635 | 79 | 0.617599 | false | 4.4641 | false | false | false |
caktus/django-opendebates | opendebates/tests/test_flatpage_metadata_override.py | 1 | 2796 | from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase
from django.utils.html import escape
from opendebates.models import FlatPageMetadataOverride
from opendebates import site_defaults
from .factories import SiteFactory, DebateFactory
class FlatPageTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
self.page1_content = 'About the site'
self.page1 = FlatPage(url='/{}/about/'.format(self.debate.prefix),
title='About',
content=self.page1_content)
self.page1.save()
self.page1.sites.add(self.site)
self.page2_content = '[An embedded video]'
self.page2 = FlatPage(url='/{}/watch/'.format(self.debate.prefix),
title='Watch Now!',
content=self.page2_content)
self.page2.save()
self.page2.sites.add(self.site)
FlatPageMetadataOverride(page=self.page2).save()
def tearDown(self):
Site.objects.clear_cache()
def test_metadata_not_overridden(self):
rsp = self.client.get(self.page1.url)
self.assertContains(rsp, self.page1_content)
self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE))
self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION))
self.assertContains(rsp, escape(site_defaults.FACEBOOK_IMAGE))
def test_default_metadata_overrides(self):
rsp = self.client.get(self.page2.url)
self.assertContains(rsp, self.page2_content)
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE))
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION))
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_IMAGE))
self.assertNotContains(rsp, escape(site_defaults.TWITTER_IMAGE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_TITLE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_DESCRIPTION))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_IMAGE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
def test_custom_metadata_overrides(self):
FlatPageMetadataOverride(
page=self.page1,
facebook_title='Foo! Foo! Foo!',
twitter_description='lorem ipsum dolor sit amet').save()
rsp = self.client.get(self.page1.url)
self.assertContains(rsp, escape('Foo! Foo! Foo!'))
self.assertContains(rsp, escape('lorem ipsum dolor sit amet'))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
| apache-2.0 | -2,610,047,757,008,014,300 | 44.836066 | 85 | 0.675966 | false | 3.814461 | true | false | false |
patrickm/chromium.src | chrome/common/extensions/docs/server2/future.py | 1 | 1238 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
_no_value = object()
def Collect(futures):
'''Creates a Future which returns a list of results from each Future in
|futures|.
'''
return Future(callback=lambda: [f.Get() for f in futures])
class Future(object):
'''Stores a value, error, or callback to be used later.
'''
def __init__(self, value=_no_value, callback=None, exc_info=None):
self._value = value
self._callback = callback
self._exc_info = exc_info
if (self._value is _no_value and
self._callback is None and
self._exc_info is None):
raise ValueError('Must have either a value, error, or callback.')
def Get(self):
'''Gets the stored value, error, or callback contents.
'''
if self._value is not _no_value:
return self._value
if self._exc_info is not None:
self._Raise()
try:
self._value = self._callback()
return self._value
except:
self._exc_info = sys.exc_info()
self._Raise()
def _Raise(self):
exc_info = self._exc_info
raise exc_info[0], exc_info[1], exc_info[2]
| bsd-3-clause | -2,560,667,322,118,759,400 | 26.511111 | 73 | 0.638934 | false | 3.619883 | false | false | false |
cliali/py2 | a.py | 1 | 1578 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages. This is built on the API wrapper, see
# echobot2.py to see the same example built on the telegram.ext bot framework.
# This program is dedicated to the public domain under the CC0 license.
import logging
import telegram
from telegram.error import NetworkError, Unauthorized
from time import sleep
update_id = None
def main():
global update_id
# Telegram Bot Authorization Token
bot = telegram.Bot('277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE')
# get the first pending update_id, this is so we can skip over it in case
# we get an "Unauthorized" exception.
try:
update_id = bot.getUpdates()[0].update_id
except IndexError:
update_id = None
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
while True:
try:
echo(bot)
except NetworkError:
sleep(1)
except Unauthorized:
# The user has removed or blocked the bot.
update_id += 1
def echo(bot):
global update_id
# Request updates after the last update_id
for update in bot.getUpdates(offset=update_id, timeout=10):
# chat_id is required to reply to any message
chat_id = update.message.chat_id
update_id = update.update_id + 1
if update.message: # your bot can receive updates without messages
# Reply to the message
update.message.reply_text(update.message.text)
if __name__ == '__main__':
main()
| apache-2.0 | 2,133,397,267,722,731,300 | 28.773585 | 86 | 0.653359 | false | 3.793269 | false | false | false |
flavoi/diventi | diventi/landing/views.py | 1 | 3927 | from itertools import chain
from django.shortcuts import render, redirect, resolve_url
from django.views.generic.detail import DetailView
from django.views.generic import ListView, TemplateView
from django.views.generic.edit import CreateView
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.http import HttpResponseNotFound
from diventi.accounts.models import DiventiUser
from diventi.accounts.forms import DiventiUserInitForm
from diventi.products.models import Product
from diventi.blog.models import Article
from diventi.feedbacks.models import Survey, Answer
from diventi.core.views import StaffRequiredMixin
from .models import (
Section,
AboutArticle,
PolicyArticle,
)
class LandingSearchView(ListView):
""" Search for every content in the project. """
template_name = "landing/search_results_quick.html"
context_object_name = 'results'
model = Section
def get_queryset(self):
results = super(LandingSearchView, self).get_queryset()
query = self.request.GET.get('q')
if query:
articles = Article.search(self, query)
products = Product.search(self, query)
users = DiventiUser.search(self, query)
results = list(chain(products, articles, users))
else:
results = None
return results
def get_context_data(self, **kwargs):
context = super(LandingSearchView, self).get_context_data(**kwargs)
context['search_query'] = self.request.GET.get('q')
return context
class DashboardView(StaffRequiredMixin, ListView):
""" Report relevant piece of contents of any supported app. """
template_name = "landing/analytics_quick.html"
context_object_name = 'results'
model = Section
def get_queryset(self):
results = super(DashboardView, self).get_queryset()
articles = Article.reporting(self)
products = Product.reporting(self)
users = DiventiUser.reporting(self)
results = list(chain(users,articles, products, ))
return results
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data(**kwargs)
featured_section = Section.objects.featured()
context['featured_section'] = featured_section
return context
def get_landing_context(request):
sections = Section.objects.not_featured()
featured_section = Section.objects.featured()
if featured_section:
pass
elif sections.exists():
featured_section = sections.first()
sections = sections.exclude(id=featured_section.id)
else:
return HttpResponseNotFound(_('This page is not available yet.'))
context = {
'sections': sections,
'featured_section': featured_section,
}
return context
class LandingTemplateView(TemplateView):
""" Renders the landing page with all necessary context. """
template_name = "landing/landing_quick.html"
def get_context_data(self, **kwargs):
context = super(LandingTemplateView, self).get_context_data(**kwargs)
landing_context = get_landing_context(self.request)
context = {**context, **landing_context} # Merge the two dictionaries
return context
class AboutArticleDetailView(DetailView):
""" Renders the 'about us' article and the content related to it. """
model = AboutArticle
template_name = "landing/about_article_quick.html"
class PolicyArticleDetailView(DetailView):
""" Renders the policy article and the content related to it. """
model = PolicyArticle
template_name = "landing/about_article_quick.html"
| apache-2.0 | 2,472,953,982,382,659,600 | 30.725 | 77 | 0.675325 | false | 4.16879 | false | false | false |
Uberi/botty-bot-bot-bot | src/plugins/timezones.py | 1 | 5164 | #!/usr/bin/env python3
import re
from datetime import datetime, date
import pytz
from .utilities import BasePlugin
from .utilities import clockify, untag_word
timezone_abbreviations = {
"est": pytz.timezone("Canada/Eastern"),
"edt": pytz.timezone("Canada/Eastern"),
"atlantic": pytz.timezone("Canada/Eastern"),
"eastern": pytz.timezone("Canada/Eastern"),
"toronto": pytz.timezone("Canada/Eastern"),
"waterloo": pytz.timezone("Canada/Eastern"),
"ontario": pytz.timezone("Canada/Eastern"),
"ny": pytz.timezone("US/Eastern"),
"pst": pytz.timezone("Canada/Pacific"),
"vancouver": pytz.timezone("Canada/Pacific"),
"pacific": pytz.timezone("US/Pacific-New"),
"sf": pytz.timezone("US/Pacific-New"),
"la": pytz.timezone("US/Pacific-New"),
"california": pytz.timezone("US/Pacific-New"),
}
other_timezones = (
("toronto", pytz.timezone("Canada/Eastern")),
("vancouver", pytz.timezone("Canada/Pacific")),
("utc", pytz.utc),
)
class TimezonesPlugin(BasePlugin):
"""
Timezone conversion plugin for Botty.
Example invocations:
#general | Me: 4pm local
#general | Botty: *EASTERN DAYLIGHT TIME* (Μe's local time) :clock4: 16:00 :point_right: *TORONTO* :clock4: 16:00 - *VANCOUVER* :clock1: 13:00 - *UTC* :clock8: 20:00
#general | Me: 6:23pm pst
#general | Botty: *PST* :clock630: 18:23 :point_right: *TORONTO* :clock930: 21:23 - *VANCOUVER* :clock630: 18:23 - *UTC* :clock130: 1:23 (tomorrow)
#general | Me: 6:23 here
#general | Botty: *EASTERN DAYLIGHT TIME* (Μe's local time) :clock630: 6:23 :point_right: *TORONTO* :clock630: 6:23 - *VANCOUVER* :clock330: 3:23 - *UTC* :clock1030: 10:23
#general | Me: 8pm toronto
#general | Botty: *TORONTO* :clock8: 20:00 :point_right: *TORONTO* :clock8: 20:00 - *VANCOUVER* :clock5: 17:00 - *UTC* :clock12: 0:00 (tomorrow)
"""
def __init__(self, bot):
super().__init__(bot)
def on_message(self, m):
if not m.is_user_text_message: return False
match = re.search(r"\b(\d\d?)(?::(\d\d))?(?:\s*(am|pm))?\s+(\w+)", m.text, re.IGNORECASE)
if not match: return False
# get time of day
if not match.group(2) and not match.group(3): return False # ignore plain numbers like "4 potato"
hour = int(match.group(1))
minute = 0 if match.group(2) is None else int(match.group(2))
if not (0 <= hour <= 23) or not (0 <= minute <= 59): return False
if match.group(3) is not None and match.group(3).lower() == "pm":
if not (1 <= hour <= 12): return False
hour = (hour % 12) + 12
today = date.today()
naive_timestamp = datetime(today.year, today.month, today.day, hour, minute)
timezone_name = match.group(4)
# get timezone and localized timestamp
if timezone_name.lower() in timezone_abbreviations: # use the specified timezone
timezone = timezone_abbreviations[timezone_name.lower()]
timezone_is_from_user_info = False
elif timezone_name.lower() in {"local", "here"}: # use the user's local timezone, specified in their profile
user_info = self.get_user_info_by_id(m.user_id)
try:
timezone = pytz.timezone(user_info.get("tz"))
except: # user does not have a valid timezone
return False
timezone_name = user_info.get("tz_label")
timezone_is_from_user_info = True
else:
return False
timestamp = timezone.localize(naive_timestamp)
# perform timezone conversions
timezone_conversions = []
for other_timezone_name, other_timezone in other_timezones:
converted_timestamp = timestamp.astimezone(other_timezone)
if converted_timestamp.date() > timestamp.date():
timezone_conversions.append("*{}* :{}: {}:{:>02} (tomorrow)".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute))
elif converted_timestamp.date() < timestamp.date():
timezone_conversions.append("*{}* :{}: {}:{:>02} (yesterday)".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute))
else:
timezone_conversions.append("*{}* :{}: {}:{:>02}".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute))
if timezone_is_from_user_info:
selected_time = "(timezone from {}'s profile) *{}* :{}: {}:{:>02}".format(untag_word(self.get_user_name_by_id(m.user_id)), timezone_name.upper(), clockify(timestamp), timestamp.hour, timestamp.minute)
else:
selected_time = "*{}* :{}: {}:{:>02}".format(timezone_name.upper(), clockify(timestamp), timestamp.hour, timestamp.minute)
self.respond_raw("{} :point_right: {}".format(selected_time, " - ".join(timezone_conversions)))
return True
| mit | 1,056,715,963,307,125,400 | 49.607843 | 212 | 0.610616 | false | 3.418543 | false | false | false |
muttiopenbts/fusion | fusion_level02_4.py | 1 | 1932 | #!/usr/bin/python
'''
Simple script to interact with fusion level 02 challenge network daemon,
#!/usr/bin/python
[email protected]
'''
from pwn import *
import sys
#Use this hexdump lib because pwntools hexdump is too slow
from hexdump import *
def doMode(mode): # Either E or Q
print 'Sending mode call: {}'.format(mode)
#Specify encryption function
io.send(mode)
def doEncryption(message, fake_message_size=None):
doMode('E')
if fake_message_size is not None:
message_size = fake_message_size
else:
message_size = len(message)
#Specify message size as little endian 8(d) = \x08\x00\x00\x00
encryption_size_bytes = p32(message_size) #Use p32, p64, or pack
print 'Sending message size as bytes\n{}'.format(encryption_size_bytes.encode('hex'))
print 'Sending message size as bytes\n{}'.format(unpack(encryption_size_bytes))
#Specify size of message to be encrypted
io.send(encryption_size_bytes)
#Generate message and send
print 'Sending message\n{}'.format(hexdump(message))
io.send(message)
data = io.recvregex('your file --]\n')
log.info(data)
#Server sends message size as 4 bytes little endian
data = io.recvn(4)
log.info('Received encrypted message size as bytes\n{}'.format(data.encode('hex')))
log.info('Size in integer\n{}'.format(unpack(data)))
encrypted_message = io.recvn(message_size)
log.info('Received encrypted message\n{}'.format(hexdump(encrypted_message)))
return encrypted_message
if __name__ == "__main__":
host = sys.argv[1]
port = sys.argv[2]
io = remote(host,int(port))
#size = 32*4096 # No crash
# xor key is 32*4 = 128 bytes
message_size = 32*4096+100 # crash
message = cyclic(message_size) #Generate unique string to help determin payload register overwrite
xor_message = doEncryption(message)
message = doEncryption(xor_message)
doMode('Q')
| gpl-3.0 | 536,043,605,942,736,000 | 36.153846 | 102 | 0.683747 | false | 3.551471 | false | false | false |
xiaonanln/myleetcode-python | src/Sudoku Solver.py | 1 | 2697 | class Solution:
# @param board, a 9x9 2D array
# Solve the Sudoku by modifying the input board in-place.
# Do not return any value.
def solveSudoku(self, board):
rowUsable = [set(xrange(1, 10)) for i in xrange(9)]
colUsable = [set(xrange(1, 10)) for i in xrange(9)]
blockUsable = [set(xrange(1, 10)) for i in xrange(9)]
__board = board
board = [ [ int(c) if c != '.' else None for c in row ] for row in board]
for row in xrange(9):
boardrow = board[row]
for col in xrange(9):
n = boardrow[col]
if n is None: continue
rowUsable[row].remove(n)
colUsable[col].remove(n)
blockindex = (row // 3) * 3 + (col // 3)
blockUsable[blockindex].remove(n)
self.rowUsable = rowUsable
self.colUsable = colUsable
self.blockUsable = blockUsable
r, c = 0, 0
self.solve(board, r, c)
for i, row in enumerate(board):
__board[i] = ''.join( str(n) for n in row)
def solve(self, board, r, c):
if c == 9:
c = 0
r += 1
if r == 9:
return True
if board[r][c] is None:
bi = (r // 3) * 3 + (c // 3)
usable = self.rowUsable[r] & self.colUsable[c] & self.blockUsable[bi]
# if r == 1: print self.rowUsable[1], usable
for n in usable:
# if r == 1: print 'using', n
board[r][c] = n
self.rowUsable[r].remove(n)
self.colUsable[c].remove(n)
self.blockUsable[bi].remove(n)
if self.solve(board, r, c+1): return True
board[r][c] = None
self.rowUsable[r].add(n)
self.colUsable[c].add(n)
self.blockUsable[bi].add(n)
return False
else:
return self.solve(board, r, c + 1)
E = '.'
# board = [
# [5, 3, E, E, 7, E, E, E, E],
# [6, E, E, 1, 9, 5, E, E, E],
# [E, 9, 8, E, E, E, E, 6, E],
# [8, E, E, E, 6, E, E, E, 3],
# [4, E, E, 8, E, 3, E, E, 1],
# [7, E, E, E, 2, E, E, E, 6],
# [E, 6, E, E, E, E, 2, 8, E],
# [E, E, E, 4, 1, 9, E, E, 5],
# [E, E, E, E, 8, E, E, 7, 9],
# ]
board = ["..9748...","7........",".2.1.9...","..7...24.",".64.1.59.",".98...3..","...8.3.2.","........6","...2759.."]
Solution().solveSudoku(board)
print '\n'.join(board) | apache-2.0 | 8,919,941,869,076,930,000 | 31.119048 | 118 | 0.411568 | false | 3.082286 | false | false | false |
kennethreitz/pipenv | pipenv/patched/notpip/_internal/network/download.py | 1 | 6458 | """Download files with progress indicators.
"""
import cgi
import logging
import mimetypes
import os
from pipenv.patched.notpip._vendor import requests
from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE
from pipenv.patched.notpip._internal.models.index import PyPI
from pipenv.patched.notpip._internal.network.cache import is_from_cache
from pipenv.patched.notpip._internal.network.utils import response_chunks
from pipenv.patched.notpip._internal.utils.misc import (
format_size,
redact_auth_from_url,
splitext,
)
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
from pipenv.patched.notpip._internal.utils.ui import DownloadProgressProvider
if MYPY_CHECK_RUNNING:
from typing import Iterable, Optional
from pipenv.patched.notpip._vendor.requests.models import Response
from pipenv.patched.notpip._internal.models.link import Link
from pipenv.patched.notpip._internal.network.session import PipSession
logger = logging.getLogger(__name__)
def _get_http_response_size(resp):
# type: (Response) -> Optional[int]
try:
return int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
return None
def _prepare_download(
resp, # type: Response
link, # type: Link
progress_bar # type: str
):
# type: (...) -> Iterable[bytes]
total_length = _get_http_response_size(resp)
if link.netloc == PyPI.file_storage_domain:
url = link.show_url
else:
url = link.url_without_fragment
logged_url = redact_auth_from_url(url)
if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
else:
logger.info("Downloading %s", logged_url)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif is_from_cache(resp):
show_progress = False
elif not total_length:
show_progress = True
elif total_length > (40 * 1000):
show_progress = True
else:
show_progress = False
chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
if not show_progress:
return chunks
return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks)
def sanitize_content_filename(filename):
# type: (str) -> str
"""
Sanitize the "filename" value from a Content-Disposition header.
"""
return os.path.basename(filename)
def parse_content_disposition(content_disposition, default_filename):
# type: (str, str) -> str
"""
Parse the "filename" value from a Content-Disposition header, and
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
filename = sanitize_content_filename(filename)
return filename or default_filename
def _get_http_response_filename(resp, link):
# type: (Response, Link) -> str
"""Get an ideal filename from the given HTTP response, falling back to
the link filename if not provided.
"""
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext = splitext(filename)[1] # type: Optional[str]
if not ext:
ext = mimetypes.guess_extension(
resp.headers.get('content-type', '')
)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
return filename
def _http_get_download(session, link):
# type: (PipSession, Link) -> Response
target_url = link.url.split('#', 1)[0]
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
return resp
class Download(object):
def __init__(
self,
response, # type: Response
filename, # type: str
chunks, # type: Iterable[bytes]
):
# type: (...) -> None
self.response = response
self.filename = filename
self.chunks = chunks
class Downloader(object):
def __init__(
self,
session, # type: PipSession
progress_bar, # type: str
):
# type: (...) -> None
self._session = session
self._progress_bar = progress_bar
def __call__(self, link):
# type: (Link) -> Download
try:
resp = _http_get_download(self._session, link)
except requests.HTTPError as e:
logger.critical(
"HTTP error %s while getting %s", e.response.status_code, link
)
raise
return Download(
resp,
_get_http_response_filename(resp, link),
_prepare_download(resp, link, self._progress_bar),
)
| mit | -2,368,371,102,283,550,700 | 31.29 | 78 | 0.644162 | false | 4.084756 | false | false | false |
stefanopanella/xapi-storage-plugins | libs/losetup.py | 1 | 1260 | import os.path
from xapi.storage.common import call
# Use Linux "losetup" to create block devices from files
class Loop:
"""An active loop device"""
def __init__(self, path, loop):
self.path = path
self.loop = loop
def destroy(self, dbg):
call(dbg, ["losetup", "-d", self.loop])
def block_device(self):
return self.loop
def find(dbg, path):
"""Return the active loop device associated with the given path"""
# The kernel loop driver will transparently follow symlinks, so
# we must too.
path = os.path.realpath(path)
for line in call(dbg, ["losetup", "-a"]).split("\n"):
line = line.strip()
if line != "":
bits = line.split()
loop = bits[0][0:-1]
open_bracket = line.find('(')
close_bracket = line.find(')')
this_path = line[open_bracket + 1:close_bracket]
if this_path == path:
return Loop(path, loop)
return None
def create(dbg, path):
"""Creates a new loop device backed by the given file"""
# losetup will resolve paths and 'find' needs to use string equality
path = os.path.realpath(path)
call(dbg, ["losetup", "-f", path])
return find(dbg, path)
| lgpl-2.1 | -1,819,758,216,933,129,700 | 26.391304 | 72 | 0.584127 | false | 3.806647 | false | false | false |
mosarg/gestione_scuola | backend/migrations/0001_initial.py | 1 | 2074 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Backend'
db.create_table(u'backend_backend', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('backendId', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('kind', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('serverIp', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39)),
('serverFqdn', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'backend', ['Backend'])
def backwards(self, orm):
# Deleting model 'Backend'
db.delete_table(u'backend_backend')
models = {
u'backend.backend': {
'Meta': {'ordering': "('-modified', '-created')", 'object_name': 'Backend'},
'backendId': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'kind': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'serverFqdn': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'serverIp': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'})
}
}
complete_apps = ['backend'] | gpl-3.0 | -7,381,446,622,908,163,000 | 48.404762 | 125 | 0.607522 | false | 3.79159 | false | false | false |
oarriaga/spatial_transformer_networks | src/models/layers.py | 1 | 5049 | from keras import backend as K
from keras.engine.topology import Layer
if K.backend() == 'tensorflow':
import tensorflow as tf
def K_meshgrid(x, y):
return tf.meshgrid(x, y)
def K_linspace(start, stop, num):
return tf.linspace(start, stop, num)
else:
raise Exception("Only 'tensorflow' is supported as backend")
class BilinearInterpolation(Layer):
"""Performs bilinear interpolation as a keras layer
References
----------
[1] Spatial Transformer Networks, Max Jaderberg, et al.
[2] https://github.com/skaae/transformer_network
[3] https://github.com/EderSantana/seya
"""
def __init__(self, output_size, **kwargs):
self.output_size = output_size
super(BilinearInterpolation, self).__init__(**kwargs)
def get_config(self):
return {
'output_size': self.output_size,
}
def compute_output_shape(self, input_shapes):
height, width = self.output_size
num_channels = input_shapes[0][-1]
return (None, height, width, num_channels)
def call(self, tensors, mask=None):
X, transformation = tensors
output = self._transform(X, transformation, self.output_size)
return output
def _interpolate(self, image, sampled_grids, output_size):
batch_size = K.shape(image)[0]
height = K.shape(image)[1]
width = K.shape(image)[2]
num_channels = K.shape(image)[3]
x = K.cast(K.flatten(sampled_grids[:, 0:1, :]), dtype='float32')
y = K.cast(K.flatten(sampled_grids[:, 1:2, :]), dtype='float32')
x = .5 * (x + 1.0) * K.cast(width, dtype='float32')
y = .5 * (y + 1.0) * K.cast(height, dtype='float32')
x0 = K.cast(x, 'int32')
x1 = x0 + 1
y0 = K.cast(y, 'int32')
y1 = y0 + 1
max_x = int(K.int_shape(image)[2] - 1)
max_y = int(K.int_shape(image)[1] - 1)
x0 = K.clip(x0, 0, max_x)
x1 = K.clip(x1, 0, max_x)
y0 = K.clip(y0, 0, max_y)
y1 = K.clip(y1, 0, max_y)
pixels_batch = K.arange(0, batch_size) * (height * width)
pixels_batch = K.expand_dims(pixels_batch, axis=-1)
flat_output_size = output_size[0] * output_size[1]
base = K.repeat_elements(pixels_batch, flat_output_size, axis=1)
base = K.flatten(base)
# base_y0 = base + (y0 * width)
base_y0 = y0 * width
base_y0 = base + base_y0
# base_y1 = base + (y1 * width)
base_y1 = y1 * width
base_y1 = base_y1 + base
indices_a = base_y0 + x0
indices_b = base_y1 + x0
indices_c = base_y0 + x1
indices_d = base_y1 + x1
flat_image = K.reshape(image, shape=(-1, num_channels))
flat_image = K.cast(flat_image, dtype='float32')
pixel_values_a = K.gather(flat_image, indices_a)
pixel_values_b = K.gather(flat_image, indices_b)
pixel_values_c = K.gather(flat_image, indices_c)
pixel_values_d = K.gather(flat_image, indices_d)
x0 = K.cast(x0, 'float32')
x1 = K.cast(x1, 'float32')
y0 = K.cast(y0, 'float32')
y1 = K.cast(y1, 'float32')
area_a = K.expand_dims(((x1 - x) * (y1 - y)), 1)
area_b = K.expand_dims(((x1 - x) * (y - y0)), 1)
area_c = K.expand_dims(((x - x0) * (y1 - y)), 1)
area_d = K.expand_dims(((x - x0) * (y - y0)), 1)
values_a = area_a * pixel_values_a
values_b = area_b * pixel_values_b
values_c = area_c * pixel_values_c
values_d = area_d * pixel_values_d
return values_a + values_b + values_c + values_d
def _make_regular_grids(self, batch_size, height, width):
# making a single regular grid
x_linspace = K_linspace(-1., 1., width)
y_linspace = K_linspace(-1., 1., height)
x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
x_coordinates = K.flatten(x_coordinates)
y_coordinates = K.flatten(y_coordinates)
ones = K.ones_like(x_coordinates)
grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)
# repeating grids for each batch
grid = K.flatten(grid)
grids = K.tile(grid, K.stack([batch_size]))
return K.reshape(grids, (batch_size, 3, height * width))
def _transform(self, X, affine_transformation, output_size):
batch_size, num_channels = K.shape(X)[0], K.shape(X)[3]
transformations = K.reshape(affine_transformation,
shape=(batch_size, 2, 3))
# transformations = K.cast(affine_transformation[:, 0:2, :], 'float32')
regular_grids = self._make_regular_grids(batch_size, *output_size)
sampled_grids = K.batch_dot(transformations, regular_grids)
interpolated_image = self._interpolate(X, sampled_grids, output_size)
new_shape = (batch_size, output_size[0], output_size[1], num_channels)
interpolated_image = K.reshape(interpolated_image, new_shape)
return interpolated_image
| mit | -3,533,227,027,551,988,700 | 35.854015 | 79 | 0.575956 | false | 3.137974 | false | false | false |
mattgemmell/DOT-MGTextEntry | mgtext.py | 1 | 12149 | #! /usr/bin/python
"""
MGText
Text-entry plugin for Pimoroni's menu system for the Raspberry Pi Display-O-Tron.
Code and info: https://github.com/mattgemmell/DOT-MGTextEntry
By: Matt Gemmell
http://mattgemmell.com/
http://twitter.com/mattgemmell
"""
from dot3k.menu import MenuOption
_UP = 0
_DOWN = 1
_LEFT = 2
_RIGHT = 3
class MGText(MenuOption):
def __init__(self):
self.cols = 16
self.initialized = False
self.scroll_up_icon = chr(0)
self.scroll_down_icon = chr(1)
self.abbreviation_icon = chr(2)
self.placeholder_icon = chr(3)
self.caps_on = True
self.symbols_mode = False
self.cancel_aborts = False # by default, Cancel button acts as Delete
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
self.uppercase_letters = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
self.lowercase_letters = list('abcdefghijklmnopqrstuvwxyz')
self.space_symbol = 'Spc'
self.line_break = '\n' # for layout only; can't be entered
self.numbers = list('0123456789')
self.quick_punctuation = list('./:@')
self.symbols = list('./:@\'"~+-=_!?,;()[]<>{}\\^|&*$%#`')
self.caps_command = "Caps"
self.symbols_command = "More"
self.delete_command = "Del"
self.cancel_command = "Cancel"
self.commit_command = "Accept"
self.commands = [self.caps_command, self.symbols_command, self.delete_command, self.cancel_command, self.commit_command]
self.uppercase_set = self.uppercase_letters
self.uppercase_set.append(self.space_symbol)
self.uppercase_set.extend(self.numbers)
self.uppercase_set.extend(self.quick_punctuation)
self.uppercase_set.extend(self.commands)
self.lowercase_set = self.lowercase_letters
self.lowercase_set.append(self.space_symbol)
self.lowercase_set.extend(self.numbers)
self.lowercase_set.extend(self.quick_punctuation)
self.lowercase_set.extend(self.commands)
self.symbols_set = self.symbols
self.symbols_set.append(self.line_break)
self.symbols_set.extend(self.commands)
self.confirm_accept = "Yes"
self.confirm_cancel = "No"
self.confirm_quit = "Quit"
self.confirm_set = [self.confirm_accept, self.confirm_cancel, self.confirm_quit]
self.display_map = [] # 2D array of options
self.display_ranges = [] # 2D array of range-arrays with option extents
self.entered_text = ''
self.confirming = False
MenuOption.__init__(self)
self.is_setup = False
def set_value(self, value):
self.entered_text = value
def get_value(self):
return self.entered_text
def begin(self):
self.initialized = False
self.confirming = False
self.symbols_mode = False
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
self.set_value('')
self.update_display_map()
def setup(self, config):
MenuOption.setup(self, config)
def cleanup(self):
self.entered_text = ''
self.display_map = []
self.display_ranges = []
def update_display_map(self):
"""
Builds two datastructures:
- display_map is an array of rows of the display, with each entry being an array of that row's options as strings.
- display_ranges is similar, but each row-array contains dictionaries that are ranges of where the corresponding option renders on the display.
"""
self.display_map = []
self.display_ranges = []
options_set = self.uppercase_set if self.caps_on else self.lowercase_set
if self.symbols_mode:
options_set = self.symbols_set
if self.confirming:
options_set = self.confirm_set
row_len = 0
self.display_map.append([])
self.display_ranges.append([])
for opt in options_set:
if (opt == self.line_break) or ((len(opt) + row_len + 2) > (self.cols - 1)):
# Start a new row
self.display_map.append([])
self.display_ranges.append([])
row_len = 0
if opt == self.line_break:
# We don't actually include line-breaks as options
continue
# Add to latest row
self.display_map[-1].append(opt)
opt_len = len(opt) + 1 # to account for the leading space
self.display_ranges[-1].append({'start': row_len, 'len': opt_len})
row_len += opt_len
def index_of_range_containing(self, row, col):
"""
This allows us to move the cursor spatially when going to a different row. For example, consider moving from a row with only two lengthy options, to a row with seven single-character options. If option 2 of 2 was selected on the former row, we wouldn't just want option 2 to be selected on the latter row after the move, because the cursor would seem to jump way over to the left. What we really want is to "move to whatever option is directly above/below the one I already had selected", which is what this method (and the display_ranges structure) allows.
"""
if row >= 0 and row < len(self.display_ranges) and col >= 0 and col < self.cols:
row_ranges = self.display_ranges[row]
index = len(row_ranges) - 1
for range in reversed(row_ranges):
if col >= range['start']:
break
index -= 1
return index
def move_cursor(self, direction):
# Move cursor appropriately using ranges
sel_row = self.selection['row']
sel_opt = self.selection['option']
sel_orig_row = sel_row
sel_orig_col = self.display_ranges[sel_row][sel_opt]['start']
if direction == _UP:
self.selection['row'] = (sel_row - 1) % len(self.display_map)
self.selection['option'] = self.index_of_range_containing(self.selection['row'], sel_orig_col)
elif direction == _DOWN:
self.selection['row'] = (sel_row + 1) % len(self.display_map)
self.selection['option'] = self.index_of_range_containing(self.selection['row'], sel_orig_col)
elif direction == _LEFT:
# We wrap back onto the previous row when appropriate
self.selection['option'] = (sel_opt - 1) % len(self.display_map[sel_row])
# Check to see if we wrapped around
if self.selection['option'] > sel_opt or len(self.display_map[sel_row]) == 1:
# Wrap to previous row
self.selection['row'] = (sel_row - 1) % len(self.display_map)
self.selection['option'] = len(self.display_map[self.selection['row']]) - 1
elif direction == _RIGHT:
# We wrap forward onto the next row when appropriate
self.selection['option'] = (sel_opt + 1) % len(self.display_map[sel_row])
# Check to see if we wrapped around
if self.selection['option'] < sel_opt or len(self.display_map[sel_row]) == 1:
# Wrap to next row
self.selection['row'] = (sel_row + 1) % len(self.display_map)
self.selection['option'] = 0
# Sanitise new selection
self.selection['option'] = max(0, self.selection['option'])
self.selection['option'] = min(len(self.display_map[self.selection['row']]) - 1, self.selection['option'])
# Update first_displayed_row appropriately
sel_row = self.selection['row']
if sel_row < self.first_displayed_row:
self.first_displayed_row = sel_row
elif sel_row > self.first_displayed_row + 1:
self.first_displayed_row = sel_row - 1
def render_row(self, row):
# Returns the actual rendered full text of a row, with all annotations
result = ""
if row >= 0 and row < len(self.display_map):
row_opts = self.display_map[row]
row_selected = (self.selection['row'] == row)
selected_option = self.selection['option']
for index, opt in enumerate(row_opts):
# Selection markers
if row_selected:
if selected_option == index:
result += "["
elif selected_option == (index - 1):
result += "]"
else:
result += " "
else:
result += " "
# Option text
if opt == self.caps_command:
if self.caps_on:
result += "lowr"
else:
result += "UPPR"
elif opt == self.symbols_command:
if self.symbols_mode:
if self.caps_on:
result += "ABC1"
else:
result += "abc1"
else:
result += "#+=$"
else:
result += opt
# Special case for end of row
if index == len(row_opts) - 1:
# Selection markers
if row_selected and selected_option == index:
result += "]"
else:
result += " "
# Add any end-of-row padding required
result += (" " * (self.cols - (len(result) + 1)))
# Scroll indicators
if row == self.first_displayed_row and row > 0:
result += self.scroll_up_icon
elif row == (self.first_displayed_row + 1) and row < (len(self.display_map) - 1):
result += self.scroll_down_icon
else:
result += " "
return result
def delete(self):
# Delete last character entered
if (not self.confirming) and len(self.entered_text) > 0:
self.entered_text = self.entered_text[:-1]
def left(self):
self.move_cursor(_LEFT)
return True
def right(self):
self.move_cursor(_RIGHT)
return True
def up(self):
self.move_cursor(_UP)
return True
def down(self):
self.move_cursor(_DOWN)
return True
def cancel(self):
if self.cancel_aborts:
# Confirm quit if we have text
if len(self.entered_text > 0):
self.confirming = True
self.update_display_map()
self.selection = {'row': 0, 'option': 1}
self.first_displayed_row = 0
return False
else:
return True
# Delete last character entered
self.delete()
return False
def select(self):
# Handle all the selectable options and commands
opt = self.display_map[self.selection['row']][self.selection['option']]
if opt == self.space_symbol:
self.entered_text += " "
elif opt == self.caps_command:
self.caps_on = not (self.caps_on)
self.symbols_mode = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
elif opt == self.symbols_command:
self.symbols_mode = not (self.symbols_mode)
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
elif opt == self.delete_command:
self.delete()
elif opt == self.cancel_command:
self.confirming = True
self.update_display_map()
self.selection = {'row': 0, 'option': 1}
self.first_displayed_row = 0
elif opt == self.commit_command:
self.confirming = True
self.update_display_map()
self.selection = {'row': 0, 'option': 1}
self.first_displayed_row = 0
elif opt == self.confirm_accept:
self.confirming = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
return True
elif opt == self.confirm_cancel:
self.confirming = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
elif opt == self.confirm_quit:
self.confirming = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
self.cancel_input = True
return True
else:
self.entered_text += opt
return False
def redraw(self, menu):
if not self.initialized:
menu.lcd.create_char(0, [0, 0, 4, 14, 31, 0, 0, 0]) # scroll up icon
menu.lcd.create_char(1, [0, 0, 0, 31, 14, 4, 0, 0]) # scroll down icon
menu.lcd.create_char(2, [0, 0, 0, 0, 0, 0, 21, 0]) # abbreviation icon
menu.lcd.create_char(3, [0, 0, 0, 0, 0, 0, 0, 28]) # placeholder icon
self.initialized = True
if not self.confirming:
# Output the editing row
text_len = len(self.entered_text)
if text_len > self.cols:
menu.write_row(0, self.abbreviation_icon + self.entered_text[text_len - self.cols + 1:])
else:
menu.write_row(0, self.entered_text + (self.placeholder_icon * (self.cols - text_len)))
# Output relevant two rows
if self.first_displayed_row < len(self.display_map):
menu.write_row(1, self.render_row(self.first_displayed_row))
else:
menu.clear_row(1)
if self.first_displayed_row + 1 < len(self.display_map):
menu.write_row(2, self.render_row(self.first_displayed_row + 1))
else:
menu.clear_row(2)
else:
# Handle the confirmation screen
if len(self.entered_text) > self.cols:
menu.write_option(0, self.entered_text, scroll=True, scroll_repeat=2000)
else:
menu.write_row(0, self.entered_text + (" " * (self.cols - len(self.entered_text))))
menu.write_row(1, 'Confirm?')
menu.write_row(2, self.render_row(self.first_displayed_row))
| mit | -2,095,706,729,113,539,300 | 30.720627 | 559 | 0.658408 | false | 3.043337 | false | false | false |
saebyn/nwgui | nwgui/gui.py | 1 | 2263 |
import pygame
from nwgui.container import AbsoluteContainer
class AbstractGUI(object):
def __init__(self, game):
raise NotImplementedError
def getGameObject(self):
raise NotImplementedError
def get(self, widgetName):
raise NotImplementedError
def setName(self, name, widget):
raise NotImplementedError
def updateLayers(self):
raise NotImplementedError
def getLayer(self):
raise NotImplementedError
def addSprite(self, widget):
raise NotImplementedError
def setActive(self, widget):
raise NotImplementedError
def setInactive(self, widget):
raise NotImplementedError
def isControlledPosition(self, position):
raise NotImplementedError
class GUI(AbsoluteContainer, AbstractGUI):
def __init__(self, game):
self._game = game
AbsoluteContainer.__init__(self, game.screen.get_width(),
game.screen.get_height(),
self, root=self)
self.image = pygame.Surface((0, 0))
self.active = None
self.names = {}
def getGameObject(self):
return self._game
def get(self, widgetName):
return self.names[widgetName]
def handleEvent(self, event):
AbsoluteContainer.handleEvent(self, event)
def updateLayers(self):
for widget in self.widgets:
widget.updateLayer()
def setParent(self, parent):
raise NotImplementedError
def isActive(self):
return self.active is not None
def setActive(self, widget):
if self.active is not None:
self.active.setInactive()
self.active = widget
def setInactive(self, widget=None):
if self.active == widget or widget is None:
self.active = None
def addSprite(self, sprite):
self._game.addGUISprite(sprite)
def setName(self, name, widget):
self.names[name] = widget
def isControlledPosition(self, position):
for widget in self._game.guiSprites.sprites():
if widget is self:
continue
if widget.rect.collidepoint(position):
return True
return False
| gpl-3.0 | -5,101,451,680,991,540,000 | 23.597826 | 66 | 0.613787 | false | 4.744235 | false | false | false |
algorhythms/LeetCode | 673 Number of Longest Increasing Subsequence.py | 1 | 1796 | #!/usr/bin/python3
"""
Given an unsorted array of integers, find the number of longest increasing
subsequence.
Example 1:
Input: [1,3,5,4,7]
Output: 2
Explanation: The two longest increasing subsequence are [1, 3, 4, 7] and
[1, 3, 5, 7].
Example 2:
Input: [2,2,2,2,2]
Output: 5
Explanation: The length of longest continuous increasing subsequence is 1, and
there are 5 subsequences' length is 1, so output 5.
Note: Length of the given array will be not exceed 2000 and the answer is
guaranteed to be fit in 32-bit signed int.
"""
from typing import List
class LenCnt:
def __init__(self, l, c):
self.l = l
self.c = c
def __repr__(self):
return repr((self.l, self.c))
class Solution:
def findNumberOfLIS(self, A: List[int]) -> int:
"""
Two pass - 1st pass find the LIS, 2nd pass find the number
Let F[i] be the length of LIS ended at A[i]
"""
if not A:
return 0
n = len(A)
F = [LenCnt(l=1, c=1) for _ in A]
mx = LenCnt(l=1, c=1)
for i in range(1, n):
for j in range(i):
if A[i] > A[j]:
if F[i].l < F[j].l + 1:
F[i].l = F[j].l + 1
F[i].c = F[j].c
elif F[i].l == F[j].l + 1:
F[i].c += F[j].c
if F[i].l > mx.l:
# mx = F[i] error, need deep copy
mx.l = F[i].l
mx.c = F[i].c
elif F[i].l == mx.l:
mx.c += F[i].c
return mx.c
if __name__ == "__main__":
assert Solution().findNumberOfLIS([1,1,1,2,2,2,3,3,3]) == 27
assert Solution().findNumberOfLIS([1, 3, 5, 4, 7]) == 2
assert Solution().findNumberOfLIS([2, 2, 2, 2, 2]) == 5
| mit | 3,544,294,024,520,078,000 | 26.630769 | 78 | 0.50167 | false | 3.059625 | false | false | false |
pearu/sympycore | sympycore/functions/algebra.py | 1 | 2063 | """ Implementes functions ring support.
"""
#
# Author: Pearu Peterson
# Created: April, 2008
#
__all__ = ['FunctionRing']
from ..core import classes, objects, init_module
from ..basealgebra import Verbatim, Algebra
from ..ring import CommutativeRing
init_module.import_heads()
class FunctionRing(CommutativeRing):
""" Base class to functions ring classes.
Use ``Function`` function to construct instances.
"""
argument_algebras = None
nargs = None
@classmethod
def get_value_algebra(cls):
return CommutativeRing
def get_argument_algebra(self, index):
return self.get_value_algebra()
@classmethod
def get_function_algebra(cls):
return classes.OperatorRing
@classmethod
def get_differential_algebra(cls):
return classes.DifferentialRing
@classmethod
def get_predefined_symbols(cls, name):
if name=='D': return D
return
@classmethod
def convert(cls, obj, typeerror=True):
tobj = type(obj)
if tobj is cls:
return obj
if isinstance(obj, cls.get_value_algebra()):
return cls(NUMBER, obj)
return super(CommutativeRing, cls).convert(obj, typeerror=typeerror)
def as_algebra(self, cls, typeerror=True):
if cls is classes.Verbatim:
return self.as_verbatim()
if type(self) is cls:
return self
#if isinstance(self, cls):
# return self.as_verbatim().as_algebra(cls)
if typeerror:
raise TypeError('Cannot convert %s to %s instance' % (type(self).__name__, cls.__name__))
return NotImplemented
def __call__(self, *args, **options):
cls = self.get_value_algebra()
#cls = classes.Calculus
evaluate = options.get('evaluate', True)
if evaluate:
result = self.head.apply(cls, self.data, self, args)
if result is not NotImplemented:
return result
return cls(APPLY, (self, args))
classes.FunctionRing = FunctionRing
| bsd-3-clause | 7,460,060,148,131,743,000 | 25.792208 | 101 | 0.62191 | false | 3.944551 | false | false | false |
NEONScience/NEON-Data-Skills | tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.py | 1 | 20510 | #!/usr/bin/env python
# coding: utf-8
# ---
# syncID: e6ccf19a4b454ca594388eeaa88ebe12
# title: "Calculate Vegetation Biomass from LiDAR Data in Python"
# description: "Learn to calculate the biomass of standing vegetation using a canopy height model data product."
# dateCreated: 2017-06-21
# authors: Tristan Goulden
# contributors: Donal O'Leary
# estimatedTime: 1 hour
# packagesLibraries: numpy, gdal, matplotlib, matplotlib.pyplot, os
# topics: lidar,remote-sensing
# languagesTool: python
# dataProduct: DP1.10098.001, DP3.30015.001,
# code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.ipynb
# tutorialSeries: intro-lidar-py-series
# urlTitle: calc-biomass-py
# ---
# <div id="ds-objectives" markdown="1">
#
# In this tutorial, we will calculate the biomass for a section of the SJER site. We
# will be using the Canopy Height Model discrete LiDAR data product as well as NEON
# field data on vegetation data. This tutorial will calculate Biomass for individual
# trees in the forest.
#
# ### Objectives
# After completing this tutorial, you will be able to:
#
# * Learn how to apply a guassian smoothing fernal for high-frequency spatial filtering
# * Apply a watershed segmentation algorithm for delineating tree crowns
# * Calculate biomass predictor variables from a CHM
# * Setup training data for Biomass predictions
# * Apply a Random Forest machine learning approach to calculate biomass
#
#
# ### Install Python Packages
#
# * **numpy**
# * **gdal**
# * **matplotlib**
# * **matplotlib.pyplot**
# * **os**
#
#
# ### Download Data
#
# If you have already downloaded the data set for the Data Institute, you have the
# data for this tutorial within the SJER directory. If you would like to just
# download the data for this tutorial use the following link.
#
# <a href="https://neondata.sharefile.com/d-s58db39240bf49ac8" class="link--button link--arrow">
# Download the Biomass Calculation teaching data subset</a>
#
# </div>
# In this tutorial, we will calculate the biomass for a section of the SJER site. We
# will be using the Canopy Height Model discrete LiDAR data product as well as NEON
# field data on vegetation data. This tutorial will calculate Biomass for individual
# trees in the forest.
#
# The calculation of biomass consists of four primary steps:
#
# 1. Delineating individual tree crowns
# 2. Calculating predictor variables for all individuals
# 3. Collecting training data
# 4. Applying a regression model to estiamte biomass from predictors
#
# In this tutorial we will use a watershed segmentation algorithm for delineating
# tree crowns (step 1) and and a Random Forest (RF) machine learning algorithm for
# relating the predictor variables to biomass (part 4). The predictor variables were
# selected following suggestions by Gleason et al. (2012) and biomass estimates were
# determined from DBH (diamter at breast height) measurements following relationships
# given in Jenkins et al. (2003).
#
# ## Get Started
#
# First, we need to specify the directory where we will find and save the data needed for this tutorial. You will need to change this line to suit your local machine. I have decided to save my data in the following directory:
# In[1]:
data_path = '/Users/olearyd/Git/data/'
# Next, we will import several of the typical libraries.
# In[2]:
import numpy as np
import os
import gdal, osr
import matplotlib.pyplot as plt
import sys
from scipy import ndimage as ndi
get_ipython().run_line_magic('matplotlib', 'inline')
# Next, we will add libraries from skilearn which will help with the watershed delination, determination of predictor variables and random forest algorithm
# In[3]:
#Import biomass specific libraries
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from sklearn.ensemble import RandomForestRegressor
# ## Define functions
#
# Now we will define a few functions that allow us to more easily work with the NEON data.
#
# * `plot_band_array`: function to plot NEON spatial data.
# In[4]:
#Define plot band array function
def plot_band_array(band_array,image_extent,title,cmap_title,colormap,colormap_limits):
plt.imshow(band_array,extent=image_extent)
cbar = plt.colorbar(); plt.set_cmap(colormap); plt.clim(colormap_limits)
cbar.set_label(cmap_title,rotation=270,labelpad=20)
plt.title(title); ax = plt.gca()
ax.ticklabel_format(useOffset=False, style='plain')
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90)
# * `array2raster`: function to output geotiff files.
# In[5]:
def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array,epsg):
cols = array.shape[1]
rows = array.shape[0]
originX = rasterOrigin[0]
originY = rasterOrigin[1]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(epsg)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
# * `raster2array`: function to conver rasters to an array.
# In[6]:
def raster2array(geotif_file):
metadata = {}
dataset = gdal.Open(geotif_file)
metadata['array_rows'] = dataset.RasterYSize
metadata['array_cols'] = dataset.RasterXSize
metadata['bands'] = dataset.RasterCount
metadata['driver'] = dataset.GetDriver().LongName
metadata['projection'] = dataset.GetProjection()
metadata['geotransform'] = dataset.GetGeoTransform()
mapinfo = dataset.GetGeoTransform()
metadata['pixelWidth'] = mapinfo[1]
metadata['pixelHeight'] = mapinfo[5]
metadata['ext_dict'] = {}
metadata['ext_dict']['xMin'] = mapinfo[0]
metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1]
metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5]
metadata['ext_dict']['yMax'] = mapinfo[3]
metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'],
metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax'])
if metadata['bands'] == 1:
raster = dataset.GetRasterBand(1)
metadata['noDataValue'] = raster.GetNoDataValue()
metadata['scaleFactor'] = raster.GetScale()
# band statistics
metadata['bandstats'] = {} # make a nested dictionary to store band stats in same
stats = raster.GetStatistics(True,True)
metadata['bandstats']['min'] = round(stats[0],2)
metadata['bandstats']['max'] = round(stats[1],2)
metadata['bandstats']['mean'] = round(stats[2],2)
metadata['bandstats']['stdev'] = round(stats[3],2)
array = dataset.GetRasterBand(1).ReadAsArray(0,0,
metadata['array_cols'],
metadata['array_rows']).astype(np.float)
array[array==int(metadata['noDataValue'])]=np.nan
array = array/metadata['scaleFactor']
return array, metadata
elif metadata['bands'] > 1:
print('More than one band ... need to modify function for case of multiple bands')
# * `crown_geometric_volume_pth`: function to get tree crown volumn.
# In[7]:
def crown_geometric_volume_pth(tree_data,min_tree_height,pth):
p = np.percentile(tree_data, pth)
tree_data_pth = [v if v < p else p for v in tree_data]
crown_geometric_volume_pth = np.sum(tree_data_pth - min_tree_height)
return crown_geometric_volume_pth, p
# * `get_predictors`: function to get the trees from the biomass data.
# In[8]:
def get_predictors(tree,chm_array, labels):
indexes_of_tree = np.asarray(np.where(labels==tree.label)).T
tree_crown_heights = chm_array[indexes_of_tree[:,0],indexes_of_tree[:,1]]
full_crown = np.sum(tree_crown_heights - np.min(tree_crown_heights))
crown50, p50 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,50)
crown60, p60 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,60)
crown70, p70 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,70)
return [tree.label,
np.float(tree.area),
tree.major_axis_length,
tree.max_intensity,
tree.min_intensity,
p50, p60, p70,
full_crown, crown50, crown60, crown70]
# ## Canopy Height Data
#
# With everything set up, we can now start working with our data by define the file path to our CHM file. Note that you will need to change this and subsequent filepaths according to your local machine.
# In[9]:
chm_file = data_path+'NEON_D17_SJER_DP3_256000_4106000_CHM.tif'
# When we output the results, we will want to include the same file information as the input, so we will gather the file name information.
# In[10]:
#Get info from chm file for outputting results
just_chm_file = os.path.basename(chm_file)
just_chm_file_split = just_chm_file.split(sep="_")
# Now we will get the CHM data...
# In[11]:
chm_array, chm_array_metadata = raster2array(chm_file)
# ..., plot it, and save the figure.
# In[12]:
#Plot the original CHM
plt.figure(1)
#Plot the CHM figure
plot_band_array(chm_array,chm_array_metadata['extent'],
'Canopy height Model',
'Canopy height (m)',
'Greens',[0, 9])
plt.savefig(data_path+just_chm_file[0:-4]+'_CHM.png',dpi=300,orientation='landscape',
bbox_inches='tight',
pad_inches=0.1)
# It looks like SJER primarily has low vegetation with scattered taller trees.
#
# ## Create Filtered CHM
#
# Now we will use a Gaussian smoothing kernal (convolution) across the data set to remove spurious high vegetation points. This will help ensure we are finding the treetops properly before running the watershed segmentation algorithm.
#
# For different forest types it may be necessary to change the input parameters. Information on the function can be found in the <a href="https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html" target="_blank">SciPy documentation</a>.
#
# Of most importance are the second and fifth inputs. The second input defines the standard deviation of the Gaussian smoothing kernal. Too large a value will apply too much smoothing, too small and some spurious high points may be left behind. The fifth, the truncate value, controls after how many standard deviations the Gaussian kernal will get cut off (since it theoretically goes to infinity).
# In[13]:
#Smooth the CHM using a gaussian filter to remove spurious points
chm_array_smooth = ndi.gaussian_filter(chm_array,2,
mode='constant',cval=0,truncate=2.0)
chm_array_smooth[chm_array==0] = 0
# Now save a copy of filtered CHM. We will later use this in our code, so we'll output it into our data directory.
# In[14]:
#Save the smoothed CHM
array2raster(data_path+'chm_filter.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,
np.array(chm_array_smooth,dtype=float),
32611)
# ## Determine local maximums
#
# Now we will run an algorithm to determine local maximums within the image. Setting indices to 'False' returns a raster of the maximum points, as opposed to a list of coordinates. The footprint parameter is an area where only a single peak can be found. This should be approximately the size of the smallest tree. Information on more sophisticated methods to define the window can be found in Chen (2006).
# In[15]:
#Calculate local maximum points in the smoothed CHM
local_maxi = peak_local_max(chm_array_smooth,indices=False, footprint=np.ones((5, 5)))
# Our new object `local_maxi` is an array of boolean values where each pixel is identified as either being the local maximum (`True`) or not being the local maximum (`False`).
# In[16]:
local_maxi
# This is very helpful, but it can be difficult to visualizee boolean values using our typical numeric plotting procedures as defined in the `plot_band_array` function above. Therefore, we will need to convert this boolean array to an numeric format to use this function. Booleans convert easily to integers with values of `False=0` and `True=1` using the `.astype(int)` method.
# In[17]:
local_maxi.astype(int)
# Next ,we can plot the raster of local maximums bo coercing the boolean array into an array ofintegers inline. The following figure shows the difference in finding local maximums for a filtered vs. non-filtered CHM.
#
# We will save the graphics (.png) in an outputs folder sister to our working directory and data outputs (.tif) to our data directory.
# In[18]:
#Plot the local maximums
plt.figure(2)
plot_band_array(local_maxi.astype(int),chm_array_metadata['extent'],
'Maximum',
'Maxi',
'Greys',
[0, 1])
plt.savefig(data_path+just_chm_file[0:-4]+ '_Maximums.png',
dpi=300,orientation='landscape',
bbox_inches='tight',pad_inches=0.1)
array2raster(data_path+'maximum.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(local_maxi,dtype=np.float32),32611)
# If we were to look at the overlap between the tree crowns and the local maxima from each method, it would appear a bit like this raster.
#
# <figure>
# <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg">
# <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"></a>
# <figcaption> The difference in finding local maximums for a filtered vs.
# non-filtered CHM.
# Source: National Ecological Observatory Network (NEON)
# </figcaption>
# </figure>
#
#
# Apply labels to all of the local maximum points
# In[19]:
#Identify all the maximum points
markers = ndi.label(local_maxi)[0]
# Next we will create a mask layer of all of the vegetation points so that the watershed segmentation will only occur on the trees and not extend into the surrounding ground points. Since 0 represent ground points in the CHM, setting the mask to 1 where the CHM is not zero will define the mask
# In[20]:
#Create a CHM mask so the segmentation will only occur on the trees
chm_mask = chm_array_smooth
chm_mask[chm_array_smooth != 0] = 1
# ## Watershed segmentation
#
# As in a river system, a watershed is divided by a ridge that divides areas. Here our watershed are the individual tree canopies and the ridge is the delineation between each one.
#
# <figure>
# <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png">
# <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"></a>
# <figcaption> A raster classified based on watershed segmentation.
# Source: National Ecological Observatory Network (NEON)
# </figcaption>
# </figure>
#
# Next, we will perform the watershed segmentation which produces a raster of labels.
# In[21]:
#Perfrom watershed segmentation
labels = watershed(chm_array_smooth, markers, mask=chm_mask)
labels_for_plot = labels.copy()
labels_for_plot = np.array(labels_for_plot,dtype = np.float32)
labels_for_plot[labels_for_plot==0] = np.nan
max_labels = np.max(labels)
# In[22]:
#Plot the segments
plot_band_array(labels_for_plot,chm_array_metadata['extent'],
'Crown Segmentation','Tree Crown Number',
'Spectral',[0, max_labels])
plt.savefig(data_path+just_chm_file[0:-4]+'_Segmentation.png',
dpi=300,orientation='landscape',
bbox_inches='tight',pad_inches=0.1)
array2raster(data_path+'labels.tif',
(chm_array_metadata['ext_dict']['xMin'],
chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(labels,dtype=float),32611)
# Now we will get several properties of the individual trees will be used as predictor variables.
# In[23]:
#Get the properties of each segment
tree_properties = regionprops(labels,chm_array)
# Now we will get the predictor variables to match the (soon to be loaded) training data using the function defined above. The first column will be segment IDs, the rest will be the predictor variables.
# In[24]:
predictors_chm = np.array([get_predictors(tree, chm_array, labels) for tree in tree_properties])
X = predictors_chm[:,1:]
tree_ids = predictors_chm[:,0]
# ## Training data
#
# We now bring in the training data file which is a simple CSV file with no header. The first column is biomass, and the remaining columns are the same predictor variables defined above. The tree diameter and max height are defined in the NEON vegetation structure data along with the tree DBH. The field validated values are used for training, while the other were determined from the CHM and camera images by manually delineating the tree crowns and pulling out the relevant information from the CHM.
#
# Biomass was calculated from DBH according to the formulas in Jenkins et al. (2003).
#
# If you didn't download this training dataset above, you can <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fobd4959-4cf0-44ab-acc6-0695a04a1afc" target="_blank">Download the training dataset CSV here</a>.
# In[25]:
#Define the file of training data
training_data_file = data_path+'SJER_Biomass_Training.csv'
#Read in the training data from a CSV file
training_data = np.genfromtxt(training_data_file,delimiter=',')
#Grab the biomass (Y) from the first line
biomass = training_data[:,0]
#Grab the biomass prdeictors from the remaining lines
biomass_predictors = training_data[:,1:12]
# ## Random Forest classifiers
#
# We can then define parameters of the Random Forest classifier and fit the predictor variables from the training data to the Biomass estaimtes.
# In[26]:
#Define paraemters for Random forest regressor
max_depth = 30
#Define regressor rules
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
#Fit the biomass to regressor variables
regr_rf.fit(biomass_predictors,biomass)
# We now apply the Random Forest model to the predictor variables to retreive biomass
# In[27]:
#Apply the model to the predictors
estimated_biomass = regr_rf.predict(X)
# For outputting a raster, copy the labels raster to a biomass raster, then cycle through the segments and assign the biomass estimate to each individual tree segment.
# In[28]:
#Set an out raster with the same size as the labels
biomass_map = np.array((labels),dtype=float)
#Assign the appropriate biomass to the labels
biomass_map[biomass_map==0] = np.nan
for tree_id, biomass_of_tree_id in zip(tree_ids, estimated_biomass):
biomass_map[biomass_map == tree_id] = biomass_of_tree_id
# ## Calc Biomass
# Collect some of the biomass statistics and then plot the results and save an output geotiff.
# In[29]:
#Get biomass stats for plotting
mean_biomass = np.mean(estimated_biomass)
std_biomass = np.std(estimated_biomass)
min_biomass = np.min(estimated_biomass)
sum_biomass = np.sum(estimated_biomass)
print('Sum of biomass is ',sum_biomass,' kg')
#Plot the biomass!
plt.figure(5)
plot_band_array(biomass_map,chm_array_metadata['extent'],
'Biomass (kg)','Biomass (kg)',
'winter',
[min_biomass+std_biomass, mean_biomass+std_biomass*3])
plt.savefig(data_path+just_chm_file_split[0]+'_'+just_chm_file_split[1]+'_'+just_chm_file_split[2]+'_'+just_chm_file_split[3]+'_'+just_chm_file_split[4]+'_'+just_chm_file_split[5]+'_'+'Biomass.png',
dpi=300,orientation='landscape',
bbox_inches='tight',
pad_inches=0.1)
array2raster(data_path+'biomass.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(biomass_map,dtype=float),32611)
# In[ ]:
| agpl-3.0 | -6,897,848,424,968,655,000 | 35.17284 | 503 | 0.709654 | false | 3.292135 | false | false | false |
abstractfactory/openmetadata-mk1 | transaction.py | 1 | 5900 | """Convenience module for the end-user
The goal of this module is to provide as high-level utilities
as possible for users who wish to have as little knowledge as
possible about Open Folder.
Target audience leans towards Technical Directors or
fellow scripters in any DCC.
"""
from __future__ import absolute_import
import os
# import sys
import errno
import logging
import shutil
import collections
from openmetadata import domain
log = logging.getLogger('openmetadata.transaction')
def write(path, channel=None, key=None, data=None):
"""Convenience method for writing metadata"""
container = domain.Folder(path)
if key and not channel:
raise ValueError("Argument `key` must be specified in "
"conjunction with `channel`")
if channel and not key:
if not isinstance(data, dict):
raise ValueError("Data passed to object of type "
"<Channel> must be of type <dict>")
container = domain.Channel(channel, container)
if channel and key:
channel = domain.Channel(channel, container)
key = domain.Key(key, channel)
container = key
container.data = data
# container.write()
print "%r = %r" % (container.path, container.data)
def update(path, channel=None, key=None, data=None):
"""Convenience method for updating metadata"""
raise NotImplementedError
def read(path, channel=None, key=None):
"""Convenience method for reading metadata
Parameters
path (str) : Path to meta folder
channel (str) : (optional) Name of individual channel
key (str) : (optional) Name of individual file
Returns
dict() : {'obj.name': content}
Calling this method with only `path` specified is identical
to calling Folder.read().data directly.
"""
if key and not channel:
raise ValueError("Must supply `channel` with `key` argument")
if not os.path.exists(path):
return {}
try:
obj = domain.Factory.create(path)
except WindowsError as e:
# Temporary fix. An error occurs when trying to
# read junctions pointing to invalid targets.
if e.errno == errno.ENOENT:
print e
return {}
raise e
assert isinstance(obj, domain.Folder)
if channel:
obj = obj.child(channel)
if not obj:
return {}
if key:
obj = obj.child(key)
if not obj:
return None
return obj.read().data
def exists(path, channel=None, key=None):
pass
def cascade(path, channel, key=None):
"""Merge metadata of each channel matching `term` up-wards through hierarchy"""
folder = domain.Folder(path)
hierarchy = _findchannels(folder, channel)
hierarchy.reverse()
# An implementation of the Property-Pattern as discussed here:
# http://steve-yegge.blogspot.co.uk/2008/10/universal-design-pattern.html
metadata_hierarchy = []
for _channel in hierarchy:
_channel.read()
_data = _channel.data or {}
metadata_hierarchy.append(_data)
# The following algorithm is based on this answer:
# http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
metadata = {}
for _metadata in metadata_hierarchy:
update(metadata, _metadata)
return metadata
def delete(path, channel=None, key=None, max_retries=10):
assert os.path.exists(path)
retries = 0
while True:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
break
except WindowsError as e:
# Sometimes, Dropbox can bother this operation;
# creating files in the midst of deleting a folder.
#
# If this happens, try again in a short while.
retries += 1
if retries > max_retries:
log.error(e)
break
import time
time.sleep(0.1)
log.info("Retired %i time(s) for %s" % (retries, path))
log.info("Removed %s" % path)
def _findchannels(folder, term, result=None):
"""Return channels matching `term` up-wards through hierarchy"""
assert isinstance(folder, domain.Folder)
result = result or []
# Note: We can only cascade channels of type .kvs
current_channel = None
# Look for `term` within folder
for _channel in folder:
if _channel.name == term and _channel.extension == '.kvs':
result.append(_channel)
current_channel = _channel
# Recurse
parent = folder.parent
if parent:
# Before we recurse, ensure this is not a path.
isroot = False
# TODO
# Find a way to optimize this. Channel is being read here
# to find the isRoot property which is used solely to
# determine whether or not to continue searching.
# This is an expensive operation, and whats worse,
# the channel is being re-read in `cascade`.
if current_channel:
data = current_channel.read().data or {}
if data.get('isRoot') is True:
isroot = True
if not isroot:
return _findchannels(parent, term, result)
return result
# def cascade(folder, term):
if __name__ == '__main__':
import openmetadata as om
package = os.getcwd()
path = os.path.join(package, 'test', 'persist')
path = om.Folder(r's:\content\jobs\test\content\shots')
# print cascade(path, 'properties')
| mit | 4,959,071,983,263,927,000 | 25.818182 | 101 | 0.605593 | false | 4.184397 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.