repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
gcallah/Indra | indraV1/models/markov_attempts/two_pop_markov_model.py | 1 | 1534 | """
You can clone this file and its companion two_pop_m_run.py
to easily get started on a new two pop markov model.
It also is a handy tool to have around for testing
new features added to the base system. The agents
don't move. They have 50% chance of changing color
from red to blue, or from blue to red.
"""
import indra.two_pop_markov as itpm
R = 0
B = 1
STATE_MAP = { R: "Red", B: "Blue" }
class TestFollower(itpm.Follower):
"""
An agent that prints its neighbors in preact
and also jumps to an empty cell: defaut behavior
from our ancestor.
Attributes:
state: Red or Blue ... whichever it is the agent
will appear to be this on the scatter plot.
ntype: node type
next_state: the next color the agent will be
"""
def __init__(self, name, goal):
super().__init__(name, goal)
self.state = R
self.ntype = STATE_MAP[R]
self.next_state = None
def postact(self):
"""
Set our type to next_state.
"""
if self.next_state is not None and self.next_state != self.state:
# print("Setting state to " + str(self.next_state))
self.set_state(self.next_state)
self.next_state = None
return self.pos
def set_state(self, new_state):
"""
Set agent's new type.
"""
old_type = self.ntype
self.state = new_state
self.ntype = STATE_MAP[new_state]
self.env.change_agent_type(self, old_type, self.ntype)
| gpl-3.0 | 7,503,514,805,912,917,000 | 26.890909 | 73 | 0.602999 | false | 3.617925 | false | false | false |
infojasyrc/client_dataws | client_dataws/lib/util/file_verificator.py | 1 | 2208 | '''
Created on Feb 02, 2013
@author: Jose Sal y Rosas
@contact: [email protected]
'''
import zlib
import hashlib
class Verificator(object):
def __init__(self):
pass
def set_parameters(self, path='', algorithm='crc32', blocksize=8192):
self.path = path
self.algorithm = algorithm
self.blocksize = blocksize
def set_algorithm(self, algorithm):
self.algorithm = algorithm
def set_file(self, path):
self.path = path
def set_block_size(self, blocksize):
self.blocksize = blocksize
def get_algorithm(self):
return self.algorithm
def get_file(self):
return self.path
def get_block_size(self):
return self.blocksize
def generatechecksum(self, path='', blocksize=8192):
resultado = 0
if path == '':
path = self.path
if blocksize == 8192:
blocksize = self.blocksize
if 'crc32' in self.algorithm:
resultado = self.executecrc(path, blocksize)
elif 'md5' in self.algorithm:
resultado = self.executemd5(path, blocksize)
return resultado
def executecrc(self, path, blocksize):
crctemp = 0
with open(path, 'rb') as f:
while True:
data = f.read(blocksize)
if not data:
break
crctemp = zlib.crc32(data, crctemp)
return crctemp
def executemd5(self, path, blocksize):
with open(path, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
def verify(self, path, checksum):
if checksum == self.generatechecksum(path):
return 'suceed'
else:
return 'failed'
if __name__ == "__main__":
path = '/home/developer/Documents/database/datos/iniciales/EW_Drift+Faraday/EW_Drift/d2012219/D2012213003.r'
obj = Verificator()
obj.set_parameters(path, 'md5')
checksum = obj.generatechecksum()
print checksum
print obj.verify(path, checksum) | mit | 8,283,340,951,751,417,000 | 23.544444 | 112 | 0.567029 | false | 3.992767 | false | false | false |
itdxer/django-project-template | {{cookiecutter.project_name}}/apps/users/migrations/0001_initial.py | 1 | 2579 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
},
bases=(models.Model,),
),
]
| mit | 6,700,627,795,742,435,000 | 68.702703 | 289 | 0.651028 | false | 4.312709 | false | false | false |
andrewchenshx/vnpy | vnpy/app/algo_trading/algos/dma_algo.py | 1 | 2651 | from vnpy.trader.constant import Offset, Direction, OrderType
from vnpy.trader.object import TradeData, OrderData, TickData
from vnpy.trader.engine import BaseEngine
from vnpy.app.algo_trading import AlgoTemplate
class DmaAlgo(AlgoTemplate):
""""""
display_name = "DMA 直接委托"
default_setting = {
"vt_symbol": "",
"direction": [Direction.LONG.value, Direction.SHORT.value],
"order_type": [
OrderType.MARKET.value,
OrderType.LIMIT.value,
OrderType.STOP.value,
OrderType.FAK.value,
OrderType.FOK.value
],
"price": 0.0,
"volume": 0.0,
"offset": [
Offset.NONE.value,
Offset.OPEN.value,
Offset.CLOSE.value,
Offset.CLOSETODAY.value,
Offset.CLOSEYESTERDAY.value
]
}
variables = [
"traded",
"vt_orderid",
"order_status",
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.direction = Direction(setting["direction"])
self.order_type = OrderType(setting["order_type"])
self.price = setting["price"]
self.volume = setting["volume"]
self.offset = Offset(setting["offset"])
# Variables
self.vt_orderid = ""
self.traded = 0
self.order_status = ""
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def on_tick(self, tick: TickData):
""""""
if not self.vt_orderid:
if self.direction == Direction.LONG:
self.vt_orderid = self.buy(
self.vt_symbol,
self.price,
self.volume,
self.order_type,
self.offset
)
else:
self.vt_orderid = self.sell(
self.vt_symbol,
self.price,
self.volume,
self.order_type,
self.offset
)
self.put_variables_event()
def on_order(self, order: OrderData):
""""""
self.traded = order.traded
self.order_status = order.status
if not order.is_active():
self.stop()
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
pass
| mit | 3,550,596,912,636,014,000 | 25.69697 | 67 | 0.501703 | false | 4.035115 | false | false | false |
smartczm/python-learn | Old-day01-10/s13-day5/get/day5/Atm/src/crontab.py | 1 | 2023 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import json
import time
from config import settings
from src.backend import logger
def main():
card_list = os.listdir(settings.USER_DIR_FOLDER)
for card in card_list:
basic_info = json.load(open(os.path.join(settings.USER_DIR_FOLDER, card, 'basic_info.json')))
struct_time = time.localtime()
# 循环账单列表,为每月的欠款计息。并写入到当月账单中
for item in basic_info['debt']:
interest = item['total_debt'] * 0.0005
if basic_info['saving'] >= interest:
basic_info['saving'] -= interest
else:
temp = interest - basic_info['saving']
basic_info['balance'] -= temp
logger_obj = logger.get_logger(card, struct_time)
logger_obj.info("欠款利息 - %f - 备注:未还款日期%s;共欠款%f,未还款%f" % (interest, item['date'], item['total_debt'], item['balance_debt'],))
json.dump(
basic_info,
open(os.path.join(settings.USER_DIR_FOLDER, basic_info['card'], "basic_info.json"), 'w')
)
# 如果当前等于10号(9号之前)
# 当前余额为负值,则将值添加到账单列表中,开始计息,同时,本月可用额度恢复。
if struct_time.tm_mday == 11 and basic_info['credit'] > basic_info['balance']:
date = time.strftime("%Y-%m-%d")
dic = {'date': date,
"total_debt": basic_info['credit'] - basic_info['balance'],
"balance_debt": basic_info['credit'] - basic_info['balance'],
}
basic_info['debt'].append(dic)
# 恢复可用额度
basic_info['balance'] = basic_info['credit']
json.dump(
basic_info,
open(os.path.join(settings.USER_DIR_FOLDER, basic_info['card'], "basic_info.json"), 'w')
)
def run():
main()
| gpl-2.0 | -718,927,813,982,822,900 | 34.134615 | 135 | 0.536946 | false | 2.980424 | false | false | false |
IsmoilovMuhriddin/allgo | rasp/diagnosis.py | 1 | 3928 | import signal
import time
import wiringpi as wp
from rasp.allgo_utils import PCA9685
from rasp.allgo_utils import ultrasonic as uls
LOW = 0
HIGH = 1
OUTPUT = wp.OUTPUT
INPUT = wp.INPUT
CAR_DIR_FW = 0
CAR_DIR_BK = 1
CAR_DIR_LF = 2
CAR_DIR_RF = 3
CAR_DIR_ST = 4
DIR_DISTANCE_ALERT = 20
preMillis = 0
keepRunning = 1
OUT = [5, 0, 1, 2, 3] # 5:front_left_led, 0:front_right_led, 1:rear_right_led, 2:rear_left_led, 3:ultra_trig
IN = [21, 22, 26, 23] # 21:left_IR, 22:center_IR, 26:right_IR, 23:ultra_echo
ULTRASONIC_TRIG = 3 # TRIG port is to use as output signal
ULTRASONIC_ECHO = 23 # ECHO port is to use as input signal
# An instance of the motor & buzzer
pca9685 = PCA9685()
#Ultrasonic ultra; # An instance of the ultrasonic sensor
ultra = uls(ULTRASONIC_TRIG,ULTRASONIC_ECHO)
# distance range: 2cm ~ 5m
# angular range: 15deg
# resolution: 3mm
"""
void setup();
void loop();
void checkUltra();
void intHandler(int dummy);
"""
def setup():
wp.wiringPiSetup() # Initialize wiringPi to load Raspbarry Pi PIN numbering scheme
"""
for(i=0; i<sizeof(OUT); i++){
pinMode(OUT[i], OUTPUT); // Set the pin as output mode
wp.digitalWrite(OUT[i], LOW); // Transmit HIGH or LOW value to the pin(5V ~ 0V)
}"""
for i in range(len(OUT)):
wp.pinMode(OUT[i],OUTPUT)
wp.digitalWrite(OUT[i], LOW)
for i in range(len(IN)):
wp.pinMode(IN[i],INPUT)
def check_ultra():
disValue=ultra.distance()
print("Distance:%.2f\t"%disValue)
def action(menu):
global curMillis
if menu==0:
pca9685.go_forward();
time.sleep(20);
pca9685.stop();
elif menu== 1:
pca9685.go_back();
time.sleep(20);
pca9685.stop();
elif menu== 2:
# frount left
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
time.sleep(20);
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
elif menu== 3:
#// frount right
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
time.sleep(20);
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
elif menu== 4:
#// rear left
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
time.sleep(20);
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
elif menu== 5:
# rear right
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
time.sleep(20);
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
elif menu ==6:
#ultrasonic
check_ultra();
elif menu== 9:
pca9685.go_right();
time.sleep(5);
pca9685.stop();
elif menu== 10:
pca9685.go_left();
time.sleep(5);
pca9685.stop();
elif menu== 8:
print("Beeping for 2 seconds\n");
pca9685.on_buzz();
time.sleep(2);
pca9685.off_buzz();
elif menu== 11:
print("EXIT\n");
keepRunning = 0;
else:
print("Check the list again\n")
print("\n")
menu = -1
def loop():
"""// return the cu
time(el
time since your arduino started) in milliseconds(1/1000 second)"""
llinevalue = 0
clinevalue = 0
rlinevalue = 0
print 'This is a diagnostic program for your mobile robot.\n'
print '0: go foward\n1: go backward\n2: front left led\n3: frount right led\n4: rear left led\n5: rear right led\n6: ultrasonic\n7: IR\n8: buzzer\n9:go right\n10: go left\n11: Exit the program\n'
print('Please select one of them: ')
menu = int(input())
action(menu)
menu = -1
"""// obstacle detection and move to another derection.
void checkUltra(){
float disValue = ultra.ReadDista
timeter();
printf("ultrasonic: %f\n",disValue);
"""
def signal_handler(dummy):
print("SIGNAL INTERRUPT",dummy)
time.sleep(1000)
keepRunning = 0;
#sda
def main (**kwargs):
setup()
signal.signal(signal.SIGINT, signal_handler)
while keepRunning:
loop()
return 0
main()
| mit | -4,003,689,042,530,230,300 | 20.944134 | 196 | 0.647912 | false | 2.550649 | false | false | false |
ppppn/twitter-bot | ReplyAndFav.py | 1 | 6678 | #!/usr/bin/python
# coding: UTF-8
from tweepy.error import TweepError
import random
import re
from const import *
from words import *
from replies import replies
import datetime
import logging
from API import GetAPI
logging.basicConfig(level=LOGLEVEL)
api = None
# 説明
# 関数リスト
# FUNCTION_NAME(args) > Returns(SUCCESS, FAILED)
# FetchHomeTL() > (TIMELINE, False)
# FormattingAndTweetForReply(status, content) > (True, False)
# CheckAndReplyToSpecialWord(account_screen_name, status) > (True, False)
# CheckAndReplyToNormalTweet(status) > (True, False)
# CheckAndCreateFav(status) > (True, False)
def UpdateAndNotifyAccountInfo():
__name__ = "UpdateAndNotifyAccountInfo()"
global api
account = api.me()
try:
if not account.name == BOT_NAME:
api.update_status(UPDATE_MSG)
api.update_profile(name=BOT_NAME)
logging.info("%s: Successfully finished.", __name__)
except TweepError, e:
logging.error("%s: %s", __name__, e.reason)
def FetchHomeTL():
__name__ = "FetchHomeTL()"
global api
since_id = api.user_timeline()[0].id
logging.debug("%s: Last post id: %d", __name__, since_id)
try:
return api.home_timeline(since_id=since_id)
except TweepError, e:
logging.error("%s: %s", __name__, e.reason)
return False
def FormattingAndTweetForReply(status, content):
__name__ = "FormattingAndTweetForReply()"
global api
#ツイートを最終的に投稿される形にフォーマットし、投稿する
error_counter = 0
#{name}を相手の名前で置き換える
content = content.format(name = status.author.name)
#@hogehogeをつける
formatted_tweet = "@" + status.author.screen_name + " " + content
#投稿する
while error_counter < ERROR_LIMIT:
try:
api.update_status(formatted_tweet, in_reply_to_status_id = int(status.id))
logging.debug("%s: The following tweet was successfully posted> '%s'",
__name__, formatted_tweet)
return True
except TweepError, e:
logging.error(e.reason)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
def CheckAndReplyToSpecialWord(account_screen_name, status):
__name__ = "CheckAndReplyToSpecialWord()"
global api
error_counter = 0
#ぼっと宛のメンションに限定
if status.in_reply_to_screen_name == account_screen_name:
for special_word in special_words:
if re.search(special_word, status.text):
logging.debug("%s: The special word '%s' was detected in %s's post '%s'",
__name__, special_word, status.author.screen_name, status.text)
num_max_patterns = len(special_words[special_word]) - 1
while error_counter < ERROR_LIMIT:
random.seed()
selected_num = random.randint(0, num_max_patterns)
content = special_words[special_word][selected_num]
#重複投稿によるエラー防止のため時刻を追記
content += " (%s)"%str(datetime.datetime.today())
logging.debug("%s: Special word reply was generated> '%s'", __name__, content)
if FormattingAndTweetForReply(status, content):
return True
else:
logging.error("%s: Reselect", __name__)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
logging.debug("%s: No special word was founded in %s's post '%s'",
__name__, status.author.screen_name, status.text)
return False
else:
return False
def CheckAndReplyToNormalTweet(status):
__name__ = "CheckAndReplyToNormalTweet()"
global api
error_counter = 0
num_max_tw = len(replies) - 1
for word in reply_words:
if re.search(word, status.text):
logging.debug("%s: The reply word '%s' was detected in %s's post '%s'",
__name__, word, status.author.screen_name, status.text)
while error_counter < ERROR_LIMIT:
random.seed()
tw_num = random.randint(0, num_max_tw)
content = replies[tw_num].format(name=status.author.name)
logging.debug("%s: Normal word reply selected> '%s'", __name__, content)
if FormattingAndTweetForReply(status, content):
return True
else:
logging.error("%s: Reselect", __name__)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
def CheckAndCreateFav(status):
__name__ = "CheckAndCreateFav()"
global api
if status.favorited == False:
error_counter = 0
for fav_word in fav_words:
if re.search(fav_word, status.text):
logging.debug("%s: Favorite word '%s' was detected in %s's post '%s'",
__name__, fav_word, status.author.screen_name, status.text)
while error_counter < ERROR_LIMIT:
try:
api.create_favorite(status.id)
logging.debug("%s: Successfully favorited %s's post> '%s'",
__name__, status.author.screen_name, status.text)
return True
except TweepError, e:
logging.error(e.reason)
error_counter += 1
logging.error("%s: Failed to create fav %d times. Aborted.",
__name__, ERROR_LIMIT)
return False
def main():
global api
api = GetAPI()
UpdateAndNotifyAccountInfo()
account_screen_name = api.me().screen_name
tw_counter = 0
fav_counter = 0
result = False
Timeline = FetchHomeTL()
contains_excluded_word = False
if Timeline == False:
logging.critical("Failed to fetch home timeline. All processes are aborted.")
else:
for status in Timeline:
contains_excluded_word = False
if status.author.screen_name == account_screen_name:
pass
#ぼっとがツイートしたものは対象外
else:
#excluded_wordに登録された単語が含まれている場合、処理しない
for excluded_word in excluded_words:
if re.search(excluded_word, status.text):
contains_excluded_word = True
if contains_excluded_word == False:
result = CheckAndReplyToSpecialWord(account_screen_name, status)
if result == False:
result = CheckAndReplyToNormalTweet(status)
if result == True:
tw_counter += 1
result = CheckAndCreateFav(status)
if result == True:
fav_counter += 1
logging.info("Reply: %d, Fav: %d", tw_counter, fav_counter)
if __name__ == "__main__":
main()
| mit | -1,916,938,151,522,885,000 | 32.989418 | 89 | 0.622821 | false | 3.33541 | false | false | false |
futureshocked/RaspberryPi-FullStack | Complete_Python2_app/lab_app.py | 1 | 6719 | from flask import Flask, request, render_template
import time
import datetime
import arrow
app = Flask(__name__)
app.debug = True # Make this False if you are no longer debugging
@app.route("/")
def hello():
return "Hello World!"
@app.route("/lab_temp")
def lab_temp():
import sys
import Adafruit_DHT
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 4)
if humidity is not None and temperature is not None:
return render_template("lab_temp.html",temp=temperature,hum=humidity)
else:
return render_template("no_sensor.html")
@app.route("/lab_env_db", methods=['GET']) #Add date limits in the URL #Arguments: from=2015-03-04&to=2015-03-05
def lab_env_db():
temperatures, humidities, timezone, from_date_str, to_date_str = get_records()
# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
time_adjusted_temperatures = []
time_adjusted_humidities = []
for record in temperatures:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_adjusted_temperatures.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])
for record in humidities:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_adjusted_humidities.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])
print "rendering lab_env_db.html with: %s, %s, %s" % (timezone, from_date_str, to_date_str)
return render_template("lab_env_db.html", timezone = timezone,
temp = time_adjusted_temperatures,
hum = time_adjusted_humidities,
from_date = from_date_str,
to_date = to_date_str,
temp_items = len(temperatures),
hum_items = len(humidities),
query_string = request.query_string, #This query string is used by the Plotly link
)
def get_records():
import sqlite3
from_date_str = request.args.get('from',time.strftime("%Y-%m-%d 00:00")) #Get the from date value from the URL
to_date_str = request.args.get('to',time.strftime("%Y-%m-%d %H:%M")) #Get the to date value from the URL
timezone = request.args.get('timezone','Etc/UTC');
range_h_form = request.args.get('range_h',''); #This will return a string, if field range_h exists in the request
range_h_int = "nan" #initialise this variable with not a number
print "REQUEST:"
print request.args
try:
range_h_int = int(range_h_form)
except:
print "range_h_form not a number"
print "Received from browser: %s, %s, %s, %s" % (from_date_str, to_date_str, timezone, range_h_int)
if not validate_date(from_date_str): # Validate date before sending it to the DB
from_date_str = time.strftime("%Y-%m-%d 00:00")
if not validate_date(to_date_str):
to_date_str = time.strftime("%Y-%m-%d %H:%M") # Validate date before sending it to the DB
print '2. From: %s, to: %s, timezone: %s' % (from_date_str,to_date_str,timezone)
# Create datetime object so that we can convert to UTC from the browser's local time
from_date_obj = datetime.datetime.strptime(from_date_str,'%Y-%m-%d %H:%M')
to_date_obj = datetime.datetime.strptime(to_date_str,'%Y-%m-%d %H:%M')
# If range_h is defined, we don't need the from and to times
if isinstance(range_h_int,int):
arrow_time_from = arrow.utcnow().replace(hours=-range_h_int)
arrow_time_to = arrow.utcnow()
from_date_utc = arrow_time_from.strftime("%Y-%m-%d %H:%M")
to_date_utc = arrow_time_to.strftime("%Y-%m-%d %H:%M")
from_date_str = arrow_time_from.to(timezone).strftime("%Y-%m-%d %H:%M")
to_date_str = arrow_time_to.to(timezone).strftime("%Y-%m-%d %H:%M")
else:
#Convert datetimes to UTC so we can retrieve the appropriate records from the database
from_date_utc = arrow.get(from_date_obj, timezone).to('Etc/UTC').strftime("%Y-%m-%d %H:%M")
to_date_utc = arrow.get(to_date_obj, timezone).to('Etc/UTC').strftime("%Y-%m-%d %H:%M")
conn = sqlite3.connect('/var/www/lab_app/lab_app.db')
curs = conn.cursor()
curs.execute("SELECT * FROM temperatures WHERE rDateTime BETWEEN ? AND ?", (from_date_utc.format('YYYY-MM-DD HH:mm'), to_date_utc.format('YYYY-MM-DD HH:mm')))
temperatures = curs.fetchall()
curs.execute("SELECT * FROM humidities WHERE rDateTime BETWEEN ? AND ?", (from_date_utc.format('YYYY-MM-DD HH:mm'), to_date_utc.format('YYYY-MM-DD HH:mm')))
humidities = curs.fetchall()
conn.close()
return [temperatures, humidities, timezone, from_date_str, to_date_str]
@app.route("/to_plotly", methods=['GET']) #This method will send the data to ploty.
def to_plotly():
import plotly.plotly as py
from plotly.graph_objs import *
temperatures, humidities, timezone, from_date_str, to_date_str = get_records()
# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
time_series_adjusted_tempreratures = []
time_series_adjusted_humidities = []
time_series_temprerature_values = []
time_series_humidity_values = []
for record in temperatures:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_series_adjusted_tempreratures.append(local_timedate.format('YYYY-MM-DD HH:mm'))
time_series_temprerature_values.append(round(record[2],2))
for record in humidities:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_series_adjusted_humidities.append(local_timedate.format('YYYY-MM-DD HH:mm')) #Best to pass datetime in text
#so that Plotly respects it
time_series_humidity_values.append(round(record[2],2))
temp = Scatter(
x=time_series_adjusted_tempreratures,
y=time_series_temprerature_values,
name='Temperature'
)
hum = Scatter(
x=time_series_adjusted_humidities,
y=time_series_humidity_values,
name='Humidity',
yaxis='y2'
)
data = Data([temp, hum])
layout = Layout(
title="Temperature and humidity in Peter's lab",
xaxis=XAxis(
type='date',
autorange=True
),
yaxis=YAxis(
title='Celcius',
type='linear',
autorange=True
),
yaxis2=YAxis(
title='Percent',
type='linear',
autorange=True,
overlaying='y',
side='right'
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='lab_temp_hum')
return plot_url
def validate_date(d):
try:
datetime.datetime.strptime(d, '%Y-%m-%d %H:%M')
return True
except ValueError:
return False
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080) | mit | 111,837,944,110,198,480 | 37.843931 | 159 | 0.651883 | false | 2.865245 | false | false | false |
michaelhkw/incubator-impala | tests/comparison/query_profile.py | 1 | 30490 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from logging import getLogger
from random import choice, randint, random, shuffle
from tests.comparison.db_types import (
Boolean,
Char,
Decimal,
Float,
Int,
TYPES,
Timestamp)
from tests.comparison.query import (
InsertClause,
InsertStatement,
Query,
StatementExecutionMode,
ValuesClause)
from tests.comparison.funcs import (
AnalyticAvg,
AnalyticCount,
AnalyticFirstValue,
AnalyticLag,
AnalyticLastValue,
AnalyticLead,
AnalyticMax,
AnalyticMin,
AnalyticSum,
And,
Coalesce,
Equals,
GreaterThan,
GreaterThanOrEquals,
If,
In,
IsDistinctFrom,
IsNotDistinctFrom,
IsNotDistinctFromOp,
LessThan,
LessThanOrEquals,
NotEquals,
NotIn,
Or,
WindowBoundary)
from tests.comparison.random_val_generator import RandomValGenerator
UNBOUNDED_PRECEDING = WindowBoundary.UNBOUNDED_PRECEDING
PRECEDING = WindowBoundary.PRECEDING
CURRENT_ROW = WindowBoundary.CURRENT_ROW
FOLLOWING = WindowBoundary.FOLLOWING
UNBOUNDED_FOLLOWING = WindowBoundary.UNBOUNDED_FOLLOWING
LOG = getLogger()
class DefaultProfile(object):
def __init__(self):
# Bounds are (min, max) values, the actual value used will be selected from the
# bounds and each value within the range has an equal probability of being selected.
self._bounds = {
'MAX_NESTED_QUERY_COUNT': (0, 2),
'MAX_NESTED_EXPR_COUNT': (0, 2),
'SELECT_ITEM_COUNT': (1, 5),
'WITH_TABLE_COUNT': (1, 3),
'TABLE_COUNT': (1, 2),
'ANALYTIC_LEAD_LAG_OFFSET': (1, 100),
'ANALYTIC_WINDOW_OFFSET': (1, 100),
'INSERT_VALUES_ROWS': (1, 10)}
# Below are interdependent weights used to determine probabilities. The probability
# of any item being selected should be (item weight) / sum(weights). A weight of
# zero means the item will never be selected.
self._weights = {
'SELECT_ITEM_CATEGORY': {
'AGG': 3,
'ANALYTIC': 1,
'BASIC': 10},
'TYPES': {
Boolean: 1,
Char: 1,
Decimal: 1,
Float: 1,
Int: 10,
Timestamp: 1},
'RELATIONAL_FUNCS': {
# The weights below are "best effort" suggestions. Because QueryGenerator
# prefers to set column types first, and some functions are "supported" only
# by some types, it means functions can be pruned off from this dictionary,
# and that will shift the probabilities. A quick example if that if a Char
# column is chosen: LessThan may not have a pre-defined signature for Char
# comparison, so LessThan shouldn't be chosen with Char columns. The
# tendency to prune will shift as the "funcs" module is adjusted to
# add/remove signatures.
And: 2,
Coalesce: 2,
Equals: 40,
GreaterThan: 2,
GreaterThanOrEquals: 2,
In: 2,
If: 2,
IsDistinctFrom: 2,
IsNotDistinctFrom: 1,
IsNotDistinctFromOp: 1,
LessThan: 2,
LessThanOrEquals: 2,
NotEquals: 2,
NotIn: 2,
Or: 2},
'CONJUNCT_DISJUNCTS': {
# And and Or appear both under RELATIONAL_FUNCS and CONJUNCT_DISJUNCTS for the
# following reasons:
# 1. And and Or are considered "relational" by virtue of taking two arguments
# and returning a Boolean. The crude signature selection means they could be
# selected, so we describe weights there.
# 2. They are set here explicitly as well so that
# QueryGenerator._create_bool_func_tree() can create a "more realistic"
# expression that has a Boolean operator at the top of the tree by explicitly
# asking for an And or Or.
# IMPALA-3896 tracks a better way to do this.
And: 5,
Or: 1},
'ANALYTIC_WINDOW': {
('ROWS', UNBOUNDED_PRECEDING, None): 1,
('ROWS', UNBOUNDED_PRECEDING, PRECEDING): 2,
('ROWS', UNBOUNDED_PRECEDING, CURRENT_ROW): 1,
('ROWS', UNBOUNDED_PRECEDING, FOLLOWING): 2,
('ROWS', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', PRECEDING, None): 1,
('ROWS', PRECEDING, PRECEDING): 2,
('ROWS', PRECEDING, CURRENT_ROW): 1,
('ROWS', PRECEDING, FOLLOWING): 2,
('ROWS', PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', CURRENT_ROW, None): 1,
('ROWS', CURRENT_ROW, CURRENT_ROW): 1,
('ROWS', CURRENT_ROW, FOLLOWING): 2,
('ROWS', CURRENT_ROW, UNBOUNDED_FOLLOWING): 2,
('ROWS', FOLLOWING, FOLLOWING): 2,
('ROWS', FOLLOWING, UNBOUNDED_FOLLOWING): 2,
# Ranges not yet supported
('RANGE', UNBOUNDED_PRECEDING, None): 0,
('RANGE', UNBOUNDED_PRECEDING, PRECEDING): 0,
('RANGE', UNBOUNDED_PRECEDING, CURRENT_ROW): 0,
('RANGE', UNBOUNDED_PRECEDING, FOLLOWING): 0,
('RANGE', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', PRECEDING, None): 0,
('RANGE', PRECEDING, PRECEDING): 0,
('RANGE', PRECEDING, CURRENT_ROW): 0,
('RANGE', PRECEDING, FOLLOWING): 0,
('RANGE', PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', CURRENT_ROW, None): 0,
('RANGE', CURRENT_ROW, CURRENT_ROW): 0,
('RANGE', CURRENT_ROW, FOLLOWING): 0,
('RANGE', CURRENT_ROW, UNBOUNDED_FOLLOWING): 0,
('RANGE', FOLLOWING, FOLLOWING): 0,
('RANGE', FOLLOWING, UNBOUNDED_FOLLOWING): 0},
'JOIN': {
'INNER': 90,
'LEFT': 30,
'RIGHT': 10,
'FULL_OUTER': 3,
'CROSS': 1},
'SUBQUERY_PREDICATE': {
('Exists', 'AGG', 'CORRELATED'): 0, # Not supported
('Exists', 'AGG', 'UNCORRELATED'): 1,
('Exists', 'NON_AGG', 'CORRELATED'): 1,
('Exists', 'NON_AGG', 'UNCORRELATED'): 1,
('NotExists', 'AGG', 'CORRELATED'): 0, # Not supported
('NotExists', 'AGG', 'UNCORRELATED'): 0, # Not supported
('NotExists', 'NON_AGG', 'CORRELATED'): 1,
('NotExists', 'NON_AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'AGG', 'CORRELATED'): 0, # Not supported
('In', 'AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'NON_AGG', 'CORRELATED'): 1,
('In', 'NON_AGG', 'UNCORRELATED'): 1,
('NotIn', 'AGG', 'CORRELATED'): 0, # Not supported
('NotIn', 'AGG', 'UNCORRELATED'): 1,
('NotIn', 'NON_AGG', 'CORRELATED'): 1,
('NotIn', 'NON_AGG', 'UNCORRELATED'): 1,
('Scalar', 'AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'AGG', 'UNCORRELATED'): 1,
('Scalar', 'NON_AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'NON_AGG', 'UNCORRELATED'): 1},
'QUERY_EXECUTION': { # Used by the discrepancy searcher
StatementExecutionMode.CREATE_TABLE_AS: 1,
StatementExecutionMode.CREATE_VIEW_AS: 1,
StatementExecutionMode.SELECT_STATEMENT: 10},
'STATEMENT': {
# TODO: Eventually make this a mix of DML and SELECT (IMPALA-4601)
Query: 1},
'INSERT_SOURCE_CLAUSE': {
Query: 3,
ValuesClause: 1},
'INSERT_COLUMN_LIST': {
'partial': 3,
'none': 1},
'VALUES_ITEM_EXPR': {
'constant': 1,
'function': 2},
'INSERT_UPSERT': {
InsertClause.CONFLICT_ACTION_IGNORE: 1,
InsertClause.CONFLICT_ACTION_UPDATE: 3}}
# On/off switches
self._flags = {
'ANALYTIC_DESIGNS': {
'TOP_LEVEL_QUERY_WITHOUT_LIMIT': True,
'DETERMINISTIC_ORDER_BY': True,
'NO_ORDER_BY': True,
'ONLY_SELECT_ITEM': True,
'UNBOUNDED_WINDOW': True,
'RANK_FUNC': True}}
# Independent probabilities where 1 means 100%. These values may be ignored depending
# on the context. For example, GROUP_BY is almost always ignored and instead
# determined by the SELECT item weights above, since mixing aggregate and
# non-aggregate items requires the use of a GROUP BY. The GROUP_BY option below is
# only applied if all of the SELECT items are non-aggregate.
self._probabilities = {
'OPTIONAL_QUERY_CLAUSES': {
'WITH': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'FROM': 1,
'WHERE': 0.5,
'GROUP_BY': 0.1, # special case, doesn't really do much, see comment above
'HAVING': 0.25,
'UNION': 0.1,
'ORDER_BY': 0.1},
'OPTIONAL_ANALYTIC_CLAUSES': {
'PARTITION_BY': 0.5,
'ORDER_BY': 0.5,
'WINDOW': 0.5}, # will only be used if ORDER BY is chosen
'MISC': {
'INLINE_VIEW': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'SELECT_DISTINCT': 0.1,
'SCALAR_SUBQUERY': 0.1,
'ONLY_USE_EQUALITY_JOIN_PREDICATES': 0.8,
'ONLY_USE_AGGREGATES_IN_HAVING_CLAUSE': 0.7,
'UNION_ALL': 0.5}} # Determines use of "ALL" but not "UNION"
self.__type_weights = {}
self.constant_generator = RandomValGenerator()
def _get_config_value(self, start_config, *keys):
value = start_config
for key in keys:
value = value[key]
return value
def weights(self, *keys):
'''Convenience method for getting the values of named weights'''
return self._get_config_value(self._weights, *keys)
def bounds(self, *keys):
'''Convenience method for getting the values of named bounds'''
return self._get_config_value(self._bounds, *keys)
def probability(self, *keys):
'''Convenience method for getting the value of named probabilities'''
return self._get_config_value(self._probabilities, *keys)
def _choose_from_bounds(self, *bounds):
'''Returns a value that is within the given bounds. Each value has an equal chance
of being chosen.
'''
if isinstance(bounds[0], str):
lower, upper = self.bounds(*bounds)
else:
lower, upper = bounds
return randint(lower, upper)
def _choose_from_weights(self, *weight_args):
'''Returns a value that is selected from the keys of weights with the probability
determined by the values of weights.
'''
if isinstance(weight_args[0], str):
weights = self.weights(*weight_args)
else:
weights = weight_args[0]
total_weight = sum(weights.itervalues())
numeric_choice = randint(1, total_weight)
for choice_, weight in weights.iteritems():
if weight <= 0:
continue
if numeric_choice <= weight:
return choice_
numeric_choice -= weight
def _choose_from_filtered_weights(self, filter, *weights):
'''Convenience method, apply the given filter before choosing a value.'''
if isinstance(weights[0], str):
weights = self.weights(*weights)
else:
weights = weights[0]
return self._choose_from_weights(dict((choice_, weight) for choice_, weight
in weights.iteritems() if filter(choice_)))
def _decide_from_probability(self, *keys):
return random() < self.probability(*keys)
def get_max_nested_query_count(self):
'''Return the maximum number of queries the top level query may contain.'''
return self._choose_from_bounds('MAX_NESTED_QUERY_COUNT')
def use_with_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WITH')
def only_use_equality_join_predicates(self):
return self._decide_from_probability('MISC', 'ONLY_USE_EQUALITY_JOIN_PREDICATES')
def only_use_aggregates_in_having_clause(self):
return self._decide_from_probability('MISC', 'ONLY_USE_AGGREGATES_IN_HAVING_CLAUSE')
def get_with_clause_table_ref_count(self):
'''Return the number of table ref entries a WITH clause should contain.'''
return self._choose_from_bounds('WITH_TABLE_COUNT')
def get_select_item_count(self):
return self._choose_from_bounds('SELECT_ITEM_COUNT')
def choose_nested_expr_count(self):
return self._choose_from_bounds('MAX_NESTED_EXPR_COUNT')
def allowed_analytic_designs(self):
return [design for design, is_enabled in self._flags['ANALYTIC_DESIGNS'].iteritems()
if is_enabled]
def use_partition_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'PARTITION_BY')
def use_order_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'ORDER_BY')
def use_window_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'WINDOW')
def choose_window_type(self):
return self._choose_from_weights('ANALYTIC_WINDOW')
def get_window_offset(self):
return self._choose_from_bounds('ANALYTIC_WINDOW_OFFSET')
def get_offset_for_analytic_lead_or_lag(self):
return self._choose_from_bounds('ANALYTIC_LEAD_LAG_OFFSET')
def get_table_count(self):
return self._choose_from_bounds('TABLE_COUNT')
def use_inline_view(self):
return self._decide_from_probability('MISC', 'INLINE_VIEW')
def choose_table(self, table_exprs):
return choice(table_exprs)
def choose_join_type(self, join_types):
return self._choose_from_filtered_weights(
lambda join_type: join_type in join_types, 'JOIN')
def choose_join_condition_count(self):
return max(1, self._choose_from_bounds('MAX_NESTED_EXPR_COUNT'))
def use_where_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WHERE')
def use_scalar_subquery(self):
return self._decide_from_probability('MISC', 'SCALAR_SUBQUERY')
def choose_subquery_predicate_category(self, func_name, allow_correlated):
weights = self.weights('SUBQUERY_PREDICATE')
func_names = set(name for name, _, _ in weights.iterkeys())
if func_name not in func_names:
func_name = 'Scalar'
allow_agg = self.weights('SELECT_ITEM_CATEGORY').get('AGG', 0)
if allow_correlated and self.bounds('TABLE_COUNT')[1] == 0:
allow_correlated = False
weights = dict(((name, use_agg, use_correlated), weight)
for (name, use_agg, use_correlated), weight in weights.iteritems()
if name == func_name and
(allow_agg or use_agg == 'NON_AGG') and
weight)
if weights:
return self._choose_from_weights(weights)
def use_distinct(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_distinct_in_func(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_group_by_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'GROUP_BY')
def use_having_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'HAVING')
def use_union_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'UNION')
def use_union_all(self):
return self._decide_from_probability('MISC', 'UNION_ALL')
def get_query_execution(self):
return self._choose_from_weights('QUERY_EXECUTION')
def use_having_without_groupby(self):
return True
def use_nested_with(self):
return True
def use_lateral_join(self):
return False
def use_boolean_expr_for_lateral_join(self):
return False
def get_num_boolean_exprs_for_lateral_join(self):
return False
# Workaround for Hive null ordering differences, and lack of 'NULL FIRST', 'NULL LAST'
# specifications. The ref db will order nulls as specified for ASC sorting to make it
# identifical to Hive. Valid return values are: 'BEFORE', 'AFTER', or 'DEFAULT',
# the latter means no specification needed.
def nulls_order_asc(self):
return 'DEFAULT'
def choose_val_expr(self, val_exprs, types=TYPES):
if not val_exprs:
raise Exception('At least on value is required')
if not types:
raise Exception('At least one type is required')
available_types = set(types) & set(val_exprs.by_type)
if not available_types:
raise Exception('None of the provided values return any of the required types')
val_type = self.choose_type(available_types)
return choice(val_exprs.by_type[val_type])
def choose_constant(self, return_type=None, allow_null=True):
if not return_type:
return_type = self.choose_type()
while True:
val = self.constant_generator.generate_val(return_type)
if val is None and not allow_null:
continue
return return_type(val)
def choose_type(self, types=TYPES):
type_weights = self.weights('TYPES')
weights = dict((type_, type_weights[type_]) for type_ in types)
if not weights:
raise Exception('None of the requested types are enabled')
return self._choose_from_weights(weights)
def choose_conjunct_disjunct_fill_ratio(self):
'''Return the ratio of ANDs and ORs to use in a boolean function tree. For example,
when creating a WHERE condition that consists of 10 nested functions, a ratio of
0.1 means 1 out of the 10 functions in the WHERE clause will be an AND or OR.
'''
return random() * random()
def choose_relational_func_fill_ratio(self):
'''Return the ratio of relational functions to use in a boolean function tree. This
ratio is applied after 'choose_conjunct_disjunct_fill_ratio()'.
'''
return random() * random()
def choose_conjunct_disjunct(self):
return self._choose_from_weights('CONJUNCT_DISJUNCTS')
def choose_relational_func_signature(self, signatures):
'''Return a relational signature chosen from "signatures". A signature is considered
to be relational if it returns a boolean and accepts more than one argument.
'''
if not signatures:
raise Exception('At least one signature is required')
filtered_signatures = filter(
lambda s: s.return_type == Boolean \
and len(s.args) > 1 \
and not any(a.is_subquery for a in s.args),
signatures)
if not filtered_signatures:
raise Exception(
'None of the provided signatures corresponded to a relational function')
func_weights = self.weights('RELATIONAL_FUNCS')
missing_funcs = set(s.func for s in filtered_signatures) - set(func_weights)
if missing_funcs:
raise Exception("Weights are missing for functions: {0}".format(missing_funcs))
return self.choose_func_signature(filtered_signatures,
self.weights('RELATIONAL_FUNCS'))
def choose_func_signature(self, signatures, _func_weights=None):
'''Return a signature chosen from "signatures".'''
if not signatures:
raise Exception('At least one signature is required')
type_weights = self.weights('TYPES')
func_weights = _func_weights
if func_weights:
distinct_funcs_in_signatures = set([s.func for s in signatures])
pruned_func_weights = {f: func_weights[f] for f in distinct_funcs_in_signatures}
func_weights = pruned_func_weights
else:
# First a function will be chosen then a signature. This is done so that the number
# of signatures a function has doesn't influence its likelihood of being chosen.
# Functions will be weighted based on the weight of the types in their arguments.
# The weights will be normalized by the number of arguments in the signature. The
# weight of a function will be the maximum weight out of all of it's signatures.
# If any signature has a type with a weight of zero, the signature will not be used.
#
# Example: type_weights = {Int: 10, Float: 1},
# funcs = [foo(Int), foo(Float), bar(Int, Float)]
#
# max signature length = 2 # from bar(Int, Float)
# weight of foo(Int) = (10 * 2)
# weight of foo(Float) = (1 * 2)
# weight of bar(Int, Float) = ((10 + 1) * 1)
# func_weights = {foo: 20, bar: 11}
#
# Note that this only selects a function, the function signature will be selected
# later. This is done to prevent function with a greater number of signatures from
# being selected more frequently.
func_weights = dict()
# The length of the signature in func_weights
signature_length_by_func = dict()
for signature in signatures:
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if not signature_weight:
continue
if (signature.func not in func_weights or
signature_weight > func_weights[signature.func]):
func_weights[signature.func] = signature_weight
signature_length_by_func[signature.func] = signature_length
if not func_weights:
raise Exception('All functions disallowed based on signature types')
distinct_signature_lengths = set(signature_length_by_func.values())
for func, weight in func_weights.iteritems():
signature_length = signature_length_by_func[func]
func_weights[func] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
func_weights[func])
func = self._choose_from_weights(func_weights)
# Same idea as above but for the signatures of the selected function.
signature_weights = dict()
signature_lengths = dict()
for idx, signature in enumerate(func.signatures()):
if signature not in signatures:
continue
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if signature_weight:
signature_weights[idx] = signature_weight
signature_lengths[idx] = signature_length
distinct_signature_lengths = set(signature_lengths.values())
for idx, weight in signature_weights.iteritems():
signature_length = signature_lengths[idx]
signature_weights[idx] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
signature_weights[idx])
idx = self._choose_from_weights(signature_weights)
return func.signatures()[idx]
def allow_func_signature(self, signature):
weights = self.weights('TYPES')
if not weights[signature.return_type]:
return False
for arg in signature.args:
if arg.is_subquery:
if not all(weights[subtype] for subtype in arg.type):
return False
elif not weights[arg.type]:
return False
return True
def get_allowed_join_signatures(self, signatures):
"""
Returns all the function signatures that are allowed inside a JOIN clause. This
method is mutually exclusive with only_use_equality_join_predicates. This results of
this method are ignored if only_use_equality_join_predicates return True.
"""
return signatures
def is_non_equality_join_predicate(self, func):
"""
Returns True if the given func is considered a non-equality join condition.
"""
return func in (GreaterThan, GreaterThanOrEquals, In,
IsNotDistinctFrom, IsNotDistinctFromOp, LessThan,
LessThanOrEquals, NotEquals, NotIn)
def get_analytic_funcs_that_cannot_contain_aggs(self):
"""
Returns a list of analytic functions that should not contain aggregate functions
"""
return None
def choose_statement(self):
return self._choose_from_weights('STATEMENT')
def choose_insert_source_clause(self):
"""
Returns whether we generate an INSERT/UPSERT SELECT or an INSERT/UPSERT VALUES
"""
return self._choose_from_weights('INSERT_SOURCE_CLAUSE')
def choose_insert_column_list(self, table):
"""
Decide whether or not an INSERT/UPSERT will be in the form of:
INSERT/UPSERT INTO table SELECT|VALUES ...
or
INSERT/UPSERT INTO table (col1, col2, ...) SELECT|VALUES ...
If the second form, the column list is shuffled. The column list will always contain
the primary key columns and between 0 and all additional columns.
"""
if 'partial' == self._choose_from_weights('INSERT_COLUMN_LIST'):
columns_to_insert = list(table.primary_keys)
min_additional_insert_cols = 0 if columns_to_insert else 1
remaining_columns = [col for col in table.cols if not col.is_primary_key]
shuffle(remaining_columns)
additional_column_count = randint(min_additional_insert_cols,
len(remaining_columns))
columns_to_insert.extend(remaining_columns[:additional_column_count])
shuffle(columns_to_insert)
return columns_to_insert
else:
return None
def choose_insert_values_row_count(self):
"""
Choose the number of rows to insert in an INSERT/UPSERT VALUES
"""
return self._choose_from_bounds('INSERT_VALUES_ROWS')
def choose_values_item_expr(self):
"""
For a VALUES clause, Choose whether a particular item in a particular row will be a
constant or a function.
"""
return self._choose_from_weights('VALUES_ITEM_EXPR')
def choose_insert_vs_upsert(self):
"""
Choose whether a particular insertion-type statement will be INSERT or UPSERT.
"""
return self._choose_from_weights('INSERT_UPSERT')
class ImpalaNestedTypesProfile(DefaultProfile):
def __init__(self):
super(ImpalaNestedTypesProfile, self).__init__()
self._probabilities['OPTIONAL_QUERY_CLAUSES']['WITH'] = 0.3
self._probabilities['MISC']['INLINE_VIEW'] = 0.3
def use_lateral_join(self):
return random() < 0.5
def use_boolean_expr_for_lateral_join(self):
return random() < 0.2
def get_num_boolean_exprs_for_lateral_join(self):
if random() < 0.8:
return 0
result = 1
while random() < 0.6:
result += 1
return result
def get_table_count(self):
num = 1
while random() < (0.85 ** num):
num += 1
return num
# This profile was added for ad-hoc testing.
class TestFunctionProfile(DefaultProfile):
def choose_func_signature(self, signatures):
if not signatures:
raise Exception('At least one signature is required')
preferred_signatures = filter(lambda s: "DistinctFrom" in s.func._NAME, signatures)
if preferred_signatures:
signatures = preferred_signatures
return super(TestFunctionProfile, self).choose_func_signature(signatures)
class HiveProfile(DefaultProfile):
def __init__(self):
super(HiveProfile, self).__init__()
self._probabilities['MISC']['ONLY_USE_EQUALITY_JOIN_PREDICATES'] = 0
def use_having_without_groupby(self):
return False
def use_nested_with(self):
return False
def nulls_order_asc(self):
return 'BEFORE'
def allow_func_signature(self, signature):
if signature.func._NAME.startswith('DateAdd'):
return False
if signature.func._NAME in ('Greatest', 'Least'):
type = signature.return_type
argtypes = [arg.type for arg in signature.args]
for argtype in argtypes:
if type is None:
type = argtype
continue
else:
if type != argtype:
return False
return DefaultProfile.allow_func_signature(self, signature)
def get_allowed_join_signatures(self, signatures):
"""
Restricts the function signatures inside a JOIN clause to either be an Equals
operator, an And operator, or any operator that only takes in one argument. The reason
is that Hive only supports equi-joins, does not allow OR operators inside a JOIN, and
does not allow any other operator that operates over multiple columns.
The reason ONLY_USE_EQUALITY_JOIN_PREDICATES is not sufficient to guarantee this is
that Hive needs to restrict the functions used based on the argument size of a
function.
"""
return [signature for signature in signatures if
signature.func in (Equals, And) or len(signature.args) == 1]
def get_analytic_funcs_that_cannot_contain_aggs(self):
"""
Hive does not support aggregate functions inside AVG, COUNT, FIRSTVALUE, LAG,
LASTVALUE, LEAD, MAX, MIN, or SUM functions
"""
return (AnalyticAvg, AnalyticCount, AnalyticFirstValue, AnalyticLag,
AnalyticLastValue, AnalyticLead, AnalyticMax, AnalyticMin, AnalyticSum)
class DMLOnlyProfile(DefaultProfile):
"""
Profile that only executes DML statements
TODO: This will be useful for testing DML; eventually this should be folded into the
default profile. (IMPALA-4601)
"""
def __init__(self):
super(DMLOnlyProfile, self).__init__()
self._weights.update({
'STATEMENT': {
InsertStatement: 1}})
PROFILES = [var for var in locals().values()
if isinstance(var, type) and var.__name__.endswith('Profile')]
| apache-2.0 | 5,076,192,376,453,511,000 | 37.546144 | 90 | 0.638373 | false | 3.812203 | false | false | false |
karlch/vimiv | vimiv/thumbnail_manager.py | 1 | 10586 | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Provides classes to store and load thumbnails from a shared cache.
The ThumbnailStore transparently creates and loads thumbnails according to the
freedesktop.org thumbnail management standard.
The ThumbnailManager provides a asynchronous mechanism to load thumbnails from
the store.
If possible, you should avoid using the store directly but use the manager
instead.
"""
import collections
import hashlib
import os
import tempfile
from multiprocessing.pool import ThreadPool as Pool
from gi._error import GError
from gi.repository import GdkPixbuf, GLib, Gtk
from gi.repository.GdkPixbuf import Pixbuf
from vimiv.helpers import get_user_cache_dir
ThumbTuple = collections.namedtuple('ThumbTuple', ['original', 'thumbnail'])
class ThumbnailManager:
"""Provides an asynchronous mechanism to load thumbnails.
Attributes:
thumbnail_store: ThumbnailStore class with the loading mechanism.
large: The thumbnail managing standard specifies two thumbnail sizes.
256x256 (large) and 128x128 (normal)
default_icon: Default icon if thumbnails are not yet loaded.
error_icon: The path to the icon which is used, when thumbnail creation
fails.
"""
_cpu_count = os.cpu_count()
if _cpu_count is None:
_cpu_count = 1
elif _cpu_count > 1:
_cpu_count -= 1
_thread_pool = Pool(_cpu_count)
_cache = {}
def __init__(self, large=True):
"""Construct a new ThumbnailManager.
Args:
large: Size of thumbnails that are created. If true 256x256 else
128x128.
"""
super(ThumbnailManager, self).__init__()
self.thumbnail_store = ThumbnailStore(large=large)
# Default icon if thumbnail creation fails
icon_theme = Gtk.IconTheme.get_default()
self.error_icon = icon_theme.lookup_icon("dialog-error", 256,
0).get_filename()
self.default_icon = icon_theme.lookup_icon("image-x-generic", 256,
0).get_filename()
def _do_get_thumbnail_at_scale(self, source_file, size, callback, index,
ignore_cache=False):
if not ignore_cache and source_file in self._cache:
pixbuf = self._cache[source_file]
else:
thumbnail_path = self.thumbnail_store.get_thumbnail(source_file,
ignore_cache)
if thumbnail_path is None:
thumbnail_path = self.error_icon
pixbuf = Pixbuf.new_from_file(thumbnail_path)
self._cache[source_file] = pixbuf
if pixbuf.get_height() != size and pixbuf.get_width != size:
pixbuf = self.scale_pixbuf(pixbuf, size)
return callback, pixbuf, index
@staticmethod
def scale_pixbuf(pixbuf, size):
"""Scale the pixbuf to the given size keeping the aspect ratio.
Either the width or the height of the returned pixbuf is `size` large,
depending on the aspect ratio.
Args:
pixbuf: The pixbuf to scale
size: The size of the new width or height
Return:
The scaled pixbuf.
"""
width = size
height = size
ratio = pixbuf.get_width() / pixbuf.get_height()
if ratio > 1:
height /= ratio
else:
width *= ratio
pixbuf = pixbuf.scale_simple(width, height,
GdkPixbuf.InterpType.BILINEAR)
return pixbuf
@staticmethod
def _do_callback(result):
GLib.idle_add(*result)
def get_thumbnail_at_scale_async(self, filename, size, callback, index,
ignore_cache=False):
"""Create the thumbnail for 'filename' and return it via 'callback'.
Creates the thumbnail for the given filename at the given size and
then calls the given callback function with the resulting pixbuf.
Args:
filename: The filename to get the thumbnail for
size: The size the returned pixbuf is scaled to
callback: A callable of form callback(pixbuf, *args)
args: Any additional arguments that can be passed to callback
ignore_cache: If true, the builtin in-memory cache is bypassed and
the thumbnail file is loaded from disk
"""
self._thread_pool.apply_async(self._do_get_thumbnail_at_scale,
(filename, size, callback, index,
ignore_cache),
callback=self._do_callback)
class ThumbnailStore(object):
"""Implements freedesktop.org's Thumbnail Managing Standard."""
KEY_URI = "Thumb::URI"
KEY_MTIME = "Thumb::MTime"
KEY_SIZE = "Thumb::Size"
KEY_WIDTH = "Thumb::Image::Width"
KEY_HEIGHT = "Thumb::Image::Height"
def __init__(self, large=True):
"""Construct a new ThumbnailStore.
Args:
large: Size of thumbnails that are created. If true 256x256 else
128x128.
"""
super(ThumbnailStore, self).__init__()
import vimiv
self.base_dir = os.path.join(get_user_cache_dir(), "thumbnails")
self.fail_dir = os.path.join(
self.base_dir, "fail", "vimiv-" + vimiv.__version__)
self.thumbnail_dir = ""
self.thumb_size = 0
self.use_large_thumbnails(large)
self._ensure_dirs_exist()
def use_large_thumbnails(self, enabled=True):
"""Specify whether this thumbnail store uses large thumbnails.
Large thumbnails have 256x256 pixels and non-large thumbnails 128x128.
Args:
enabled: If true large thumbnails will be used.
"""
if enabled:
self.thumbnail_dir = os.path.join(self.base_dir, "large")
self.thumb_size = 256
else:
self.thumbnail_dir = os.path.join(self.base_dir, "normal")
self.thumb_size = 128
def get_thumbnail(self, filename, ignore_current=False):
"""Get the path of the thumbnail of the given filename.
If the requested thumbnail does not yet exist, it will first be created
before returning its path.
Args:
filename: The filename to get the thumbnail for.
ignore_current: If True, ignore saved thumbnails and force a
recreation. Needed as transforming images from within thumbnail
mode may happen faster than in 1s.
Return:
The path of the thumbnail file or None if thumbnail creation failed.
"""
# Don't create thumbnails for thumbnail cache
if filename.startswith(self.base_dir):
return filename
thumbnail_filename = self._get_thumbnail_filename(filename)
thumbnail_path = self._get_thumbnail_path(thumbnail_filename)
if os.access(thumbnail_path, os.R_OK) \
and self._is_current(filename, thumbnail_path) \
and not ignore_current:
return thumbnail_path
fail_path = self._get_fail_path(thumbnail_filename)
if os.path.exists(fail_path):
# We already tried to create a thumbnail for the given file but
# failed; don't try again.
return None
if self._create_thumbnail(filename, thumbnail_filename):
return thumbnail_path
return None
def _ensure_dirs_exist(self):
os.makedirs(self.thumbnail_dir, 0o700, exist_ok=True)
os.makedirs(self.fail_dir, 0o700, exist_ok=True)
def _is_current(self, source_file, thumbnail_path):
source_mtime = str(self._get_source_mtime(source_file))
thumbnail_mtime = self._get_thumbnail_mtime(thumbnail_path)
return source_mtime == thumbnail_mtime
def _get_thumbnail_filename(self, filename):
uri = self._get_source_uri(filename)
return hashlib.md5(bytes(uri, "UTF-8")).hexdigest() + ".png"
@staticmethod
def _get_source_uri(filename):
return "file://" + os.path.abspath(os.path.expanduser(filename))
def _get_thumbnail_path(self, thumbnail_filename):
return os.path.join(self.thumbnail_dir, thumbnail_filename)
def _get_fail_path(self, thumbnail_filename):
return os.path.join(self.fail_dir, thumbnail_filename)
@staticmethod
def _get_source_mtime(src):
return int(os.path.getmtime(src))
def _get_thumbnail_mtime(self, thumbnail_path):
pixbuf = Pixbuf.new_from_file(thumbnail_path)
mtime = pixbuf.get_options()["tEXt::" + self.KEY_MTIME]
return mtime
def _create_thumbnail(self, source_file, thumbnail_filename):
# Cannot access source; create neither thumbnail nor fail file
if not os.access(source_file, os.R_OK):
return False
try:
image = Pixbuf.new_from_file_at_scale(source_file, self.thumb_size,
self.thumb_size, True)
dest_path = self._get_thumbnail_path(thumbnail_filename)
success = True
except GError:
image = Pixbuf.new(GdkPixbuf.Colorspace.RGB, False, 8, 1, 1)
dest_path = self._get_fail_path(thumbnail_filename)
success = False
width = 0
height = 0
try:
_, width, height = GdkPixbuf.Pixbuf.get_file_info(source_file)
except IOError:
pass
options = {
"tEXt::" + self.KEY_URI: str(self._get_source_uri(source_file)),
"tEXt::" + self.KEY_MTIME: str(self._get_source_mtime(source_file)),
"tEXt::" + self.KEY_SIZE: str(os.path.getsize(source_file))
}
if width > 0 and height > 0:
options["tEXt::" + self.KEY_WIDTH] = str(width)
options["tEXt::" + self.KEY_HEIGHT] = str(height)
# First create temporary file and then move it. This avoids problems
# with concurrent access of the thumbnail cache, since "move" is an
# atomic operation
handle, tmp_filename = tempfile.mkstemp(dir=self.base_dir)
os.close(handle)
os.chmod(tmp_filename, 0o600)
image.savev(tmp_filename, "png", list(options.keys()),
list(options.values()))
os.replace(tmp_filename, dest_path)
return success
| mit | 974,612,254,628,089,200 | 35.885017 | 80 | 0.601833 | false | 4.171001 | false | false | false |
OpenKMIP/PyKMIP | kmip/pie/objects.py | 1 | 68704 | # Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import abstractmethod
import sqlalchemy
from sqlalchemy import Column, event, ForeignKey, Integer, String, VARBINARY
from sqlalchemy import Boolean
from sqlalchemy.ext.associationproxy import association_proxy
import binascii
import six
from kmip.core import enums
from kmip.pie import sqltypes as sql
app_specific_info_map = sqlalchemy.Table(
"app_specific_info_map",
sql.Base.metadata,
sqlalchemy.Column(
"managed_object_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"managed_objects.uid",
ondelete="CASCADE"
)
),
sqlalchemy.Column(
"app_specific_info_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"app_specific_info.id",
ondelete="CASCADE"
)
)
)
object_group_map = sqlalchemy.Table(
"object_group_map",
sql.Base.metadata,
sqlalchemy.Column(
"managed_object_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"managed_objects.uid",
ondelete="CASCADE"
)
),
sqlalchemy.Column(
"object_group_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"object_groups.id",
ondelete="CASCADE"
)
)
)
class ManagedObject(sql.Base):
"""
The abstract base class of the simplified KMIP object hierarchy.
A ManagedObject is a core KMIP object that is the subject of key
management operations. It contains various attributes that are common to
all types of ManagedObjects, including keys, certificates, and various
types of secret or sensitive data.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
value: The value of the ManagedObject. Type varies, usually bytes.
unique_identifier: The string ID of the ManagedObject.
names: A list of names associated with the ManagedObject.
object_type: An enumeration associated with the type of ManagedObject.
"""
__tablename__ = 'managed_objects'
unique_identifier = Column('uid', Integer, primary_key=True)
_object_type = Column('object_type', sql.EnumType(enums.ObjectType))
_class_type = Column('class_type', String(50))
value = Column('value', VARBINARY(1024))
name_index = Column(Integer, default=0)
_names = sqlalchemy.orm.relationship(
"ManagedObjectName",
back_populates="mo",
cascade="all, delete-orphan",
order_by="ManagedObjectName.id"
)
names = association_proxy('_names', 'name')
operation_policy_name = Column(
'operation_policy_name',
String(50),
default='default'
)
sensitive = Column("sensitive", Boolean, default=False)
initial_date = Column(Integer, default=0)
_owner = Column('owner', String(50), default=None)
app_specific_info = sqlalchemy.orm.relationship(
"ApplicationSpecificInformation",
secondary=app_specific_info_map,
back_populates="managed_objects",
order_by="ApplicationSpecificInformation.id",
passive_deletes=True
)
object_groups = sqlalchemy.orm.relationship(
"ObjectGroup",
secondary=object_group_map,
back_populates="managed_objects",
order_by="ObjectGroup.id",
passive_deletes=True
)
__mapper_args__ = {
'polymorphic_identity': 'ManagedObject',
'polymorphic_on': _class_type
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self):
"""
Create a ManagedObject.
"""
self.value = None
self.unique_identifier = None
self.name_index = 0
self.names = list()
self.operation_policy_name = None
self.initial_date = 0
self.sensitive = False
self._object_type = None
self._owner = None
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._application_specific_informations = list()
self._contact_information = None
self._object_groups = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._archive_date = None
self._last_change_date = None
@property
def object_type(self):
"""
Accessor and property definition for the object type attribute.
Returns:
ObjectType: An ObjectType enumeration that corresponds to the
class of the object.
"""
return self._object_type
@object_type.setter
def object_type(self, value):
"""
Set blocker for the object type attribute.
Raises:
AttributeError: Always raised to block setting of attribute.
"""
raise AttributeError("object type cannot be set")
@abstractmethod
def validate(self):
"""
Verify that the contents of the ManagedObject are valid.
"""
pass
@abstractmethod
def __repr__(self):
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def __eq__(self, other):
pass
@abstractmethod
def __ne__(self, other):
pass
class CryptographicObject(ManagedObject):
"""
The abstract base class of all ManagedObjects related to cryptography.
A CryptographicObject is a core KMIP object that is the subject of key
management operations. It contains various attributes that are common to
all types of CryptographicObjects, including keys and certificates.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_usage_masks: A list of usage mask enumerations
describing how the CryptographicObject will be used.
"""
__tablename__ = 'crypto_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('managed_objects.uid'),
primary_key=True)
cryptographic_usage_masks = Column('cryptographic_usage_mask',
sql.UsageMaskType)
state = Column('state', sql.EnumType(enums.State))
__mapper_args__ = {
'polymorphic_identity': 'CryptographicObject'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self):
"""
Create a CryptographicObject.
"""
super(CryptographicObject, self).__init__()
self.cryptographic_usage_masks = list()
self.state = enums.State.PRE_ACTIVE
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._digests = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._activation_date = None
self._compromise_date = None
self._compromise_occurrence_date = None
self._deactivation_date = None
self._destroy_date = None
self._fresh = None
self._lease_time = None
self._links = list()
self._revocation_reason = None
class Key(CryptographicObject):
"""
The abstract base class of all ManagedObjects that are cryptographic keys.
A Key is a core KMIP object that is the subject of key management
operations. It contains various attributes that are common to all types of
Keys, including symmetric and asymmetric keys.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_algorithm: A CryptographicAlgorithm enumeration defining
the algorithm the key should be used with.
cryptographic_length: An int defining the length of the key in bits.
key_format_type: A KeyFormatType enumeration defining the format of
the key value.
key_wrapping_data: A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'keys'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
cryptographic_algorithm = Column(
'cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm))
cryptographic_length = Column('cryptographic_length', Integer)
key_format_type = Column(
'key_format_type', sql.EnumType(enums.KeyFormatType))
# Key wrapping data fields
_kdw_wrapping_method = Column(
'_kdw_wrapping_method',
sql.EnumType(enums.WrappingMethod),
default=None
)
_kdw_eki_unique_identifier = Column(
'_kdw_eki_unique_identifier',
String,
default=None
)
_kdw_eki_cp_block_cipher_mode = Column(
'_kdw_eki_cp_block_cipher_mode',
sql.EnumType(enums.BlockCipherMode),
default=None
)
_kdw_eki_cp_padding_method = Column(
'_kdw_eki_cp_padding_method',
sql.EnumType(enums.PaddingMethod),
default=None
)
_kdw_eki_cp_hashing_algorithm = Column(
'_kdw_eki_cp_hashing_algorithm',
sql.EnumType(enums.HashingAlgorithm),
default=None
)
_kdw_eki_cp_key_role_type = Column(
'_kdw_eki_cp_key_role_type',
sql.EnumType(enums.KeyRoleType),
default=None
)
_kdw_eki_cp_digital_signature_algorithm = Column(
'_kdw_eki_cp_digital_signature_algorithm',
sql.EnumType(enums.DigitalSignatureAlgorithm),
default=None
)
_kdw_eki_cp_cryptographic_algorithm = Column(
'_kdw_eki_cp_cryptographic_algorithm',
sql.EnumType(enums.CryptographicAlgorithm),
default=None
)
_kdw_eki_cp_random_iv = Column(
'_kdw_eki_cp_random_iv',
Boolean,
default=None
)
_kdw_eki_cp_iv_length = Column(
'_kdw_eki_cp_iv_length',
Integer,
default=None
)
_kdw_eki_cp_tag_length = Column(
'_kdw_eki_cp_tag_length',
Integer,
default=None
)
_kdw_eki_cp_fixed_field_length = Column(
'_kdw_eki_cp_fixed_field_length',
Integer,
default=None
)
_kdw_eki_cp_invocation_field_length = Column(
'_kdw_eki_cp_invocation_field_length',
Integer
)
_kdw_eki_cp_counter_length = Column(
'_kdw_eki_cp_counter_length',
Integer,
default=None
)
_kdw_eki_cp_initial_counter_value = Column(
'_kdw_eki_cp_initial_counter_value',
Integer,
default=None
)
_kdw_mski_unique_identifier = Column(
'_kdw_mski_unique_identifier',
String,
default=None
)
_kdw_mski_cp_block_cipher_mode = Column(
'_kdw_mski_cp_block_cipher_mode',
sql.EnumType(enums.BlockCipherMode),
default=None
)
_kdw_mski_cp_padding_method = Column(
'_kdw_mski_cp_padding_method',
sql.EnumType(enums.PaddingMethod),
default=None
)
_kdw_mski_cp_hashing_algorithm = Column(
'_kdw_mski_cp_hashing_algorithm',
sql.EnumType(enums.HashingAlgorithm),
default=None
)
_kdw_mski_cp_key_role_type = Column(
'_kdw_mski_cp_key_role_type',
sql.EnumType(enums.KeyRoleType),
default=None
)
_kdw_mski_cp_digital_signature_algorithm = Column(
'_kdw_mski_cp_digital_signature_algorithm',
sql.EnumType(enums.DigitalSignatureAlgorithm),
default=None
)
_kdw_mski_cp_cryptographic_algorithm = Column(
'_kdw_mski_cp_cryptographic_algorithm',
sql.EnumType(enums.CryptographicAlgorithm),
default=None
)
_kdw_mski_cp_random_iv = Column(
'_kdw_mski_cp_random_iv',
Boolean,
default=None
)
_kdw_mski_cp_iv_length = Column(
'_kdw_mski_cp_iv_length',
Integer,
default=None
)
_kdw_mski_cp_tag_length = Column(
'_kdw_mski_cp_tag_length',
Integer,
default=None
)
_kdw_mski_cp_fixed_field_length = Column(
'_kdw_mski_cp_fixed_field_length',
Integer,
default=None
)
_kdw_mski_cp_invocation_field_length = Column(
'_kdw_mski_cp_invocation_field_length',
Integer,
default=None
)
_kdw_mski_cp_counter_length = Column(
'_kdw_mski_cp_counter_length',
Integer,
default=None
)
_kdw_mski_cp_initial_counter_value = Column(
'_kdw_mski_cp_initial_counter_value',
Integer,
default=None
)
_kdw_mac_signature = Column(
'_kdw_mac_signature',
VARBINARY(1024),
default=None
)
_kdw_iv_counter_nonce = Column(
'_kdw_iv_counter_nonce',
VARBINARY(1024),
default=None
)
_kdw_encoding_option = Column(
'_kdw_encoding_option',
sql.EnumType(enums.EncodingOption),
default=None
)
__mapper_args__ = {
'polymorphic_identity': 'Key'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self, key_wrapping_data=None):
"""
Create a Key object.
Args:
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(Key, self).__init__()
self.cryptographic_algorithm = None
self.cryptographic_length = None
self.key_format_type = None
self.key_wrapping_data = key_wrapping_data
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._cryptographic_parameters = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._usage_limits = None
@property
def key_wrapping_data(self):
"""
Retrieve all of the relevant key wrapping data fields and return them
as a dictionary.
"""
key_wrapping_data = {}
encryption_key_info = {
'unique_identifier': self._kdw_eki_unique_identifier,
'cryptographic_parameters': {
'block_cipher_mode': self._kdw_eki_cp_block_cipher_mode,
'padding_method': self._kdw_eki_cp_padding_method,
'hashing_algorithm': self._kdw_eki_cp_hashing_algorithm,
'key_role_type': self._kdw_eki_cp_key_role_type,
'digital_signature_algorithm':
self._kdw_eki_cp_digital_signature_algorithm,
'cryptographic_algorithm':
self._kdw_eki_cp_cryptographic_algorithm,
'random_iv': self._kdw_eki_cp_random_iv,
'iv_length': self._kdw_eki_cp_iv_length,
'tag_length': self._kdw_eki_cp_tag_length,
'fixed_field_length': self._kdw_eki_cp_fixed_field_length,
'invocation_field_length':
self._kdw_eki_cp_invocation_field_length,
'counter_length': self._kdw_eki_cp_counter_length,
'initial_counter_value':
self._kdw_eki_cp_initial_counter_value
}
}
if not any(encryption_key_info['cryptographic_parameters'].values()):
encryption_key_info['cryptographic_parameters'] = {}
if not any(encryption_key_info.values()):
encryption_key_info = {}
mac_sign_key_info = {
'unique_identifier': self._kdw_mski_unique_identifier,
'cryptographic_parameters': {
'block_cipher_mode': self._kdw_mski_cp_block_cipher_mode,
'padding_method': self._kdw_mski_cp_padding_method,
'hashing_algorithm': self._kdw_mski_cp_hashing_algorithm,
'key_role_type': self._kdw_mski_cp_key_role_type,
'digital_signature_algorithm':
self._kdw_mski_cp_digital_signature_algorithm,
'cryptographic_algorithm':
self._kdw_mski_cp_cryptographic_algorithm,
'random_iv': self._kdw_mski_cp_random_iv,
'iv_length': self._kdw_mski_cp_iv_length,
'tag_length': self._kdw_mski_cp_tag_length,
'fixed_field_length': self._kdw_mski_cp_fixed_field_length,
'invocation_field_length':
self._kdw_mski_cp_invocation_field_length,
'counter_length': self._kdw_mski_cp_counter_length,
'initial_counter_value':
self._kdw_mski_cp_initial_counter_value
}
}
if not any(mac_sign_key_info['cryptographic_parameters'].values()):
mac_sign_key_info['cryptographic_parameters'] = {}
if not any(mac_sign_key_info.values()):
mac_sign_key_info = {}
key_wrapping_data['wrapping_method'] = self._kdw_wrapping_method
key_wrapping_data['encryption_key_information'] = encryption_key_info
key_wrapping_data['mac_signature_key_information'] = mac_sign_key_info
key_wrapping_data['mac_signature'] = self._kdw_mac_signature
key_wrapping_data['iv_counter_nonce'] = self._kdw_iv_counter_nonce
key_wrapping_data['encoding_option'] = self._kdw_encoding_option
if not any(key_wrapping_data.values()):
key_wrapping_data = {}
return key_wrapping_data
@key_wrapping_data.setter
def key_wrapping_data(self, value):
"""
Set the key wrapping data attributes using a dictionary.
"""
if value is None:
value = {}
elif not isinstance(value, dict):
raise TypeError("Key wrapping data must be a dictionary.")
self._kdw_wrapping_method = value.get('wrapping_method')
eki = value.get('encryption_key_information')
if eki is None:
eki = {}
self._kdw_eki_unique_identifier = eki.get('unique_identifier')
eki_cp = eki.get('cryptographic_parameters')
if eki_cp is None:
eki_cp = {}
self._kdw_eki_cp_block_cipher_mode = eki_cp.get('block_cipher_mode')
self._kdw_eki_cp_padding_method = eki_cp.get('padding_method')
self._kdw_eki_cp_hashing_algorithm = eki_cp.get('hashing_algorithm')
self._kdw_eki_cp_key_role_type = eki_cp.get('key_role_type')
self._kdw_eki_cp_digital_signature_algorithm = \
eki_cp.get('digital_signature_algorithm')
self._kdw_eki_cp_cryptographic_algorithm = \
eki_cp.get('cryptographic_algorithm')
self._kdw_eki_cp_random_iv = eki_cp.get('random_iv')
self._kdw_eki_cp_iv_length = eki_cp.get('iv_length')
self._kdw_eki_cp_tag_length = eki_cp.get('tag_length')
self._kdw_eki_cp_fixed_field_length = eki_cp.get('fixed_field_length')
self._kdw_eki_cp_invocation_field_length = \
eki_cp.get('invocation_field_length')
self._kdw_eki_cp_counter_length = eki_cp.get('counter_length')
self._kdw_eki_cp_initial_counter_value = \
eki_cp.get('initial_counter_value')
mski = value.get('mac_signature_key_information')
if mski is None:
mski = {}
self._kdw_mski_unique_identifier = mski.get('unique_identifier')
mski_cp = mski.get('cryptographic_parameters')
if mski_cp is None:
mski_cp = {}
self._kdw_mski_cp_block_cipher_mode = mski_cp.get('block_cipher_mode')
self._kdw_mski_cp_padding_method = mski_cp.get('padding_method')
self._kdw_mski_cp_hashing_algorithm = mski_cp.get('hashing_algorithm')
self._kdw_mski_cp_key_role_type = mski_cp.get('key_role_type')
self._kdw_mski_cp_digital_signature_algorithm = \
mski_cp.get('digital_signature_algorithm')
self._kdw_mski_cp_cryptographic_algorithm = \
mski_cp.get('cryptographic_algorithm')
self._kdw_mski_cp_random_iv = mski_cp.get('random_iv')
self._kdw_mski_cp_iv_length = mski_cp.get('iv_length')
self._kdw_mski_cp_tag_length = mski_cp.get('tag_length')
self._kdw_mski_cp_fixed_field_length = \
mski_cp.get('fixed_field_length')
self._kdw_mski_cp_invocation_field_length = \
mski_cp.get('invocation_field_length')
self._kdw_mski_cp_counter_length = mski_cp.get('counter_length')
self._kdw_mski_cp_initial_counter_value = \
mski_cp.get('initial_counter_value')
self._kdw_mac_signature = value.get('mac_signature')
self._kdw_iv_counter_nonce = value.get('iv_counter_nonce')
self._kdw_encoding_option = value.get('encoding_option')
class SymmetricKey(Key):
"""
The SymmetricKey class of the simplified KMIP object hierarchy.
A SymmetricKey is a core KMIP object that is the subject of key
management operations. For more information, see Section 2.2 of the KMIP
1.1 specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the SymmetricKey.
cryptographic_length: The length in bits of the SymmetricKey value.
value: The bytes of the SymmetricKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for
SymmetricKey application.
names: The string names of the SymmetricKey.
key_wrapping_data: A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'symmetric_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'SymmetricKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value, masks=None,
name='Symmetric Key', key_wrapping_data=None):
"""
Create a SymmetricKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
masks(list): A list of CryptographicUsageMask enumerations defining
how the key will be used. Optional, defaults to None.
name(string): The string name of the key. Optional, defaults to
'Symmetric Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(SymmetricKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.SYMMETRIC_KEY
self.key_format_type = enums.KeyFormatType.RAW
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.names = [name]
if masks:
self.cryptographic_usage_masks.extend(masks)
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._process_start_date = None
self._protect_stop_date = None
self.validate()
def validate(self):
"""
Verify that the contents of the SymmetricKey object are valid.
Raises:
TypeError: if the types of any SymmetricKey attributes are invalid
ValueError: if the key length and key value length do not match
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
if not self.key_wrapping_data:
if (len(self.value) * 8) != self.cryptographic_length:
msg = "key length ({0}) not equal to key value length ({1})"
msg = msg.format(
self.cryptographic_length,
len(self.value) * 8
)
raise ValueError(msg)
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "SymmetricKey({0}, {1}, {2}, {3})".format(
algorithm,
length,
value,
key_wrapping_data
)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SymmetricKey):
if self.value != other.value:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SymmetricKey):
return not (self == other)
else:
return NotImplemented
event.listen(SymmetricKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class PublicKey(Key):
"""
The PublicKey class of the simplified KMIP object hierarchy.
A PublicKey is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the PublicKey.
cryptographic_length: The length in bits of the PublicKey.
value: The bytes of the PublicKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for PublicKey
application.
names: The list of string names of the PublicKey.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'public_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'PublicKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value,
format_type=enums.KeyFormatType.X_509, masks=None,
name='Public Key', key_wrapping_data=None):
"""
Create a PublicKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
format_type(KeyFormatType): An enumeration defining the format of
the key value. Optional, defaults to enums.KeyFormatType.X_509.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used. Optional, defaults to None.
name(string): The string name of the key. Optional, defaults to
'Public Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(PublicKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.PUBLIC_KEY
self._valid_formats = [
enums.KeyFormatType.RAW,
enums.KeyFormatType.X_509,
enums.KeyFormatType.PKCS_1]
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.key_format_type = format_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_domain_parameters = list()
self.validate()
def validate(self):
"""
Verify that the contents of the PublicKey object are valid.
Raises:
TypeError: if the types of any PublicKey attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
elif not isinstance(self.key_format_type, enums.KeyFormatType):
raise TypeError("key format type must be a KeyFormatType "
"enumeration")
elif self.key_format_type not in self._valid_formats:
raise ValueError("key format type must be one of {0}".format(
self._valid_formats))
# TODO (peter-hamilton) Verify that the key bytes match the key format
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
format_type = "format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "PublicKey({0}, {1}, {2}, {3}, {4})".format(
algorithm, length, value, format_type, key_wrapping_data)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, PublicKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, PublicKey):
return not (self == other)
else:
return NotImplemented
event.listen(PublicKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class PrivateKey(Key):
"""
The PrivateKey class of the simplified KMIP object hierarchy.
A PrivateKey is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the PrivateKey.
cryptographic_length: The length in bits of the PrivateKey.
value: The bytes of the PrivateKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for PrivateKey
application. Optional, defaults to None.
names: The list of string names of the PrivateKey. Optional, defaults
to 'Private Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'private_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'PrivateKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value, format_type, masks=None,
name='Private Key', key_wrapping_data=None):
"""
Create a PrivateKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
format_type(KeyFormatType): An enumeration defining the format of
the key value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(PrivateKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.PRIVATE_KEY
self._valid_formats = [
enums.KeyFormatType.RAW,
enums.KeyFormatType.PKCS_1,
enums.KeyFormatType.PKCS_8]
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.key_format_type = format_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_domain_parameters = list()
self.validate()
def validate(self):
"""
Verify that the contents of the PrivateKey object are valid.
Raises:
TypeError: if the types of any PrivateKey attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
elif not isinstance(self.key_format_type, enums.KeyFormatType):
raise TypeError("key format type must be a KeyFormatType "
"enumeration")
elif self.key_format_type not in self._valid_formats:
raise ValueError("key format type must be one of {0}".format(
self._valid_formats))
# TODO (peter-hamilton) Verify that the key bytes match the key format
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
format_type = "format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "PrivateKey({0}, {1}, {2}, {3}, {4})".format(
algorithm, length, value, format_type, key_wrapping_data)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, PrivateKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, PrivateKey):
return not (self == other)
else:
return NotImplemented
event.listen(PrivateKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class SplitKey(Key):
"""
"""
__mapper_args__ = {"polymorphic_identity": "SplitKey"}
__table_args__ = {"sqlite_autoincrement": True}
__tablename__ = "split_keys"
unique_identifier = sqlalchemy.Column(
"uid",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("keys.uid"),
primary_key=True
)
# Split Key object fields
_split_key_parts = sqlalchemy.Column(
"_split_key_parts",
sqlalchemy.Integer,
default=None
)
_key_part_identifier = sqlalchemy.Column(
"_key_part_identifier",
sqlalchemy.Integer,
default=None
)
_split_key_threshold = sqlalchemy.Column(
"_split_key_threshold",
sqlalchemy.Integer,
default=None
)
_split_key_method = sqlalchemy.Column(
"_split_key_method",
sql.EnumType(enums.SplitKeyMethod),
default=None
)
_prime_field_size = sqlalchemy.Column(
"_prime_field_size",
sqlalchemy.BigInteger,
default=None
)
def __init__(self,
cryptographic_algorithm=None,
cryptographic_length=None,
key_value=None,
cryptographic_usage_masks=None,
name="Split Key",
key_format_type=enums.KeyFormatType.RAW,
key_wrapping_data=None,
split_key_parts=None,
key_part_identifier=None,
split_key_threshold=None,
split_key_method=None,
prime_field_size=None):
"""
Create a SplitKey.
Args:
cryptographic_algorithm(enum): A CryptographicAlgorithm enumeration
identifying the type of algorithm for the split key. Required.
cryptographic_length(int): The length in bits of the split key.
Required.
key_value(bytes): The bytes representing the split key. Required.
cryptographic_usage_masks(list): A list of CryptographicUsageMask
enumerations defining how the split key will be used. Optional,
defaults to None.
name(string): The string name of the split key. Optional, defaults
to "Split Key".
key_format_type (enum): A KeyFormatType enumeration specifying the
format of the split key. Optional, defaults to Raw.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the split key has been wrapped.
Optional, defaults to None.
split_key_parts (int): An integer specifying the total number of
parts of the split key. Required.
key_part_identifier (int): An integer specifying which key part
of the split key this key object represents. Required.
split_key_threshold (int): An integer specifying the minimum
number of key parts required to reconstruct the split key.
Required.
split_key_method (enum): A SplitKeyMethod enumeration specifying
how the key was split. Required.
prime_field_size (int): A big integer specifying the prime field
size used for the Polynomial Sharing Prime Field split key
method. Optional, defaults to None.
"""
super(SplitKey, self).__init__(key_wrapping_data=key_wrapping_data)
self._object_type = enums.ObjectType.SPLIT_KEY
self.key_format_type = key_format_type
self.value = key_value
self.cryptographic_algorithm = cryptographic_algorithm
self.cryptographic_length = cryptographic_length
self.names = [name]
if cryptographic_usage_masks:
self.cryptographic_usage_masks.extend(cryptographic_usage_masks)
self.split_key_parts = split_key_parts
self.key_part_identifier = key_part_identifier
self.split_key_threshold = split_key_threshold
self.split_key_method = split_key_method
self.prime_field_size = prime_field_size
@property
def split_key_parts(self):
return self._split_key_parts
@split_key_parts.setter
def split_key_parts(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._split_key_parts = value
else:
raise TypeError("The split key parts must be an integer.")
@property
def key_part_identifier(self):
return self._key_part_identifier
@key_part_identifier.setter
def key_part_identifier(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._key_part_identifier = value
else:
raise TypeError("The key part identifier must be an integer.")
@property
def split_key_threshold(self):
return self._split_key_threshold
@split_key_threshold.setter
def split_key_threshold(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._split_key_threshold = value
else:
raise TypeError("The split key threshold must be an integer.")
@property
def split_key_method(self):
return self._split_key_method
@split_key_method.setter
def split_key_method(self, value):
if (value is None) or (isinstance(value, enums.SplitKeyMethod)):
self._split_key_method = value
else:
raise TypeError(
"The split key method must be a SplitKeyMethod enumeration."
)
@property
def prime_field_size(self):
return self._prime_field_size
@prime_field_size.setter
def prime_field_size(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._prime_field_size = value
else:
raise TypeError("The prime field size must be an integer.")
def __repr__(self):
cryptographic_algorithm = "cryptographic_algorithm={0}".format(
self.cryptographic_algorithm
)
cryptographic_length = "cryptographic_length={0}".format(
self.cryptographic_length
)
key_value = "key_value={0}".format(binascii.hexlify(self.value))
key_format_type = "key_format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
cryptographic_usage_masks = "cryptographic_usage_masks={0}".format(
self.cryptographic_usage_masks
)
names = "name={0}".format(self.names)
split_key_parts = "split_key_parts={0}".format(self.split_key_parts)
key_part_identifier = "key_part_identifier={0}".format(
self.key_part_identifier
)
split_key_threshold = "split_key_threshold={0}".format(
self.split_key_threshold
)
split_key_method = "split_key_method={0}".format(self.split_key_method)
prime_field_size = "prime_field_size={0}".format(self.prime_field_size)
return "SplitKey({0})".format(
", ".join(
[
cryptographic_algorithm,
cryptographic_length,
key_value,
key_format_type,
key_wrapping_data,
cryptographic_usage_masks,
names,
split_key_parts,
key_part_identifier,
split_key_threshold,
split_key_method,
prime_field_size
]
)
)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SplitKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
elif self.cryptographic_usage_masks != \
other.cryptographic_usage_masks:
return False
elif self.names != other.names:
return False
elif self.split_key_parts != other.split_key_parts:
return False
elif self.key_part_identifier != other.key_part_identifier:
return False
elif self.split_key_threshold != other.split_key_threshold:
return False
elif self.split_key_method != other.split_key_method:
return False
elif self.prime_field_size != other.prime_field_size:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SplitKey):
return not (self == other)
else:
return NotImplemented
event.listen(
SplitKey._names,
"append",
sql.attribute_append_factory("name_index"),
retval=False
)
class Certificate(CryptographicObject):
"""
The Certificate class of the simplified KMIP object hierarchy.
A Certificate is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
certificate_type: The type of the Certificate.
value: The bytes of the Certificate.
cryptographic_usage_masks: The list of usage mask flags for
Certificate application.
names: The list of string names of the Certificate.
"""
__tablename__ = 'certificates'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
certificate_type = Column(
'certificate_type', sql.EnumType(enums.CertificateType))
__mapper_args__ = {
'polymorphic_identity': 'Certificate'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self, certificate_type, value, masks=None,
name='Certificate'):
"""
Create a Certificate.
Args:
certificate_type(CertificateType): An enumeration defining the
type of the certificate.
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate.
"""
super(Certificate, self).__init__()
self._object_type = enums.ObjectType.CERTIFICATE
self.value = value
self.certificate_type = certificate_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._cryptographic_algorithm = None
self._cryptographic_length = None
self._certificate_length = None
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_parameters = list()
self._digital_signature_algorithm = list()
self.validate()
def validate(self):
"""
Verify that the contents of the Certificate object are valid.
Raises:
TypeError: if the types of any Certificate attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("certificate value must be bytes")
elif not isinstance(self.certificate_type,
enums.CertificateType):
raise TypeError("certificate type must be a CertificateType "
"enumeration")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"certificate mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("certificate name {0} must be a string".format(
position))
def __str__(self):
return str(binascii.hexlify(self.value))
class X509Certificate(Certificate):
"""
The X509Certificate class of the simplified KMIP object hierarchy.
An X509Certificate is a core KMIP object that is the subject of key
management operations. For more information, see Section 2.2 of the KMIP
1.1 specification.
Attributes:
value: The bytes of the Certificate.
cryptographic_usage_masks: The list of usage mask flags for
Certificate application.
names: The list of string names of the Certificate.
"""
__tablename__ = 'x509_certificates'
unique_identifier = Column('uid', Integer,
ForeignKey('certificates.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'X509Certificate'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, masks=None, name='X.509 Certificate'):
"""
Create an X509Certificate.
Args:
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate.
"""
super(X509Certificate, self).__init__(
enums.CertificateType.X_509, value, masks, name)
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._x509_certificate_identifier = None
self._x509_certificate_subject = None
self._x509_certificate_issuer = None
self.validate()
def __repr__(self):
certificate_type = "certificate_type={0}".format(self.certificate_type)
value = "value={0}".format(binascii.hexlify(self.value))
return "X509Certificate({0}, {1})".format(certificate_type, value)
def __eq__(self, other):
if isinstance(other, X509Certificate):
if self.value != other.value:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, X509Certificate):
return not (self == other)
else:
return NotImplemented
event.listen(X509Certificate._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class SecretData(CryptographicObject):
"""
The SecretData class of the simplified KMIP object hierarchy.
SecretData is one of several CryptographicObjects and is one of the core
KMIP objects that are the subject of key management operations. For more
information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_usage_masks: A list of usage mask enumerations
describing how the CryptographicObject will be used.
data_type: The type of the secret value.
"""
__tablename__ = 'secret_data_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
data_type = Column('data_type', sql.EnumType(enums.SecretDataType))
__mapper_args__ = {
'polymorphic_identity': 'SecretData'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, data_type, masks=None, name='Secret Data'):
"""
Create a SecretData object.
Args:
value(bytes): The bytes representing secret data.
data_type(SecretDataType): An enumeration defining the type of the
secret value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
"""
super(SecretData, self).__init__()
self._object_type = enums.ObjectType.SECRET_DATA
self.value = value
self.data_type = data_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self.validate()
def validate(self):
"""
Verify that the contents of the SecretData object are valid.
Raises:
TypeError: if the types of any SecretData attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("secret value must be bytes")
elif not isinstance(self.data_type, enums.SecretDataType):
raise TypeError("secret data type must be a SecretDataType "
"enumeration")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"secret data mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("secret data name {0} must be a string".format(
position))
def __repr__(self):
value = "value={0}".format(binascii.hexlify(self.value))
data_type = "data_type={0}".format(self.data_type)
return "SecretData({0}, {1})".format(value, data_type)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SecretData):
if self.value != other.value:
return False
elif self.data_type != other.data_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SecretData):
return not (self == other)
else:
return NotImplemented
event.listen(SecretData._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class OpaqueObject(ManagedObject):
"""
The OpaqueObject class of the simplified KMIP object hierarchy.
OpaqueObject is one of several ManagedObjects and is one of the core KMIP
objects that are the subject of key management operations. For more
information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
opaque_type: The type of the opaque value.
"""
__tablename__ = 'opaque_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('managed_objects.uid'),
primary_key=True)
opaque_type = Column('opaque_type', sql.EnumType(enums.OpaqueDataType))
__mapper_args__ = {
'polymorphic_identity': 'OpaqueData'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, opaque_type, name='Opaque Object'):
"""
Create a OpaqueObject.
Args:
value(bytes): The bytes representing opaque data.
opaque_type(OpaqueDataType): An enumeration defining the type of
the opaque value.
name(string): The string name of the opaque object.
"""
super(OpaqueObject, self).__init__()
self._object_type = enums.ObjectType.OPAQUE_DATA
self.value = value
self.opaque_type = opaque_type
self.names.append(name)
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._digest = None
self._revocation_reason = None
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._destroy_date = None
self._compromise_occurrence_date = None
self._compromise_date = None
self.validate()
def validate(self):
"""
Verify that the contents of the OpaqueObject are valid.
Raises:
TypeError: if the types of any OpaqueObject attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("opaque value must be bytes")
elif not isinstance(self.opaque_type, enums.OpaqueDataType):
raise TypeError("opaque data type must be an OpaqueDataType "
"enumeration")
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("opaque data name {0} must be a string".format(
position))
def __repr__(self):
value = "value={0}".format(binascii.hexlify(self.value))
opaque_type = "opaque_type={0}".format(self.opaque_type)
return "OpaqueObject({0}, {1})".format(value, opaque_type)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, OpaqueObject):
if self.value != other.value:
return False
elif self.opaque_type != other.opaque_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, OpaqueObject):
return not (self == other)
else:
return NotImplemented
event.listen(OpaqueObject._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class ApplicationSpecificInformation(sql.Base):
__tablename__ = "app_specific_info"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_application_namespace = sqlalchemy.Column(
"application_namespace",
sqlalchemy.String
)
_application_data = sqlalchemy.Column(
"application_data",
sqlalchemy.String
)
managed_objects = sqlalchemy.orm.relationship(
"ManagedObject",
secondary=app_specific_info_map,
back_populates="app_specific_info"
)
def __init__(self,
application_namespace=None,
application_data=None):
"""
Create an ApplicationSpecificInformation attribute.
Args:
application_namespace (str): A string specifying the application
namespace. Required.
application_data (str): A string specifying the application data.
Required.
"""
super(ApplicationSpecificInformation, self).__init__()
self.application_namespace = application_namespace
self.application_data = application_data
@property
def application_namespace(self):
return self._application_namespace
@application_namespace.setter
def application_namespace(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._application_namespace = value
else:
raise TypeError("The application namespace must be a string.")
@property
def application_data(self):
return self._application_data
@application_data.setter
def application_data(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._application_data = value
else:
raise TypeError("The application data must be a string.")
def __repr__(self):
application_namespace = "application_namespace='{}'".format(
self.application_namespace
)
application_data = "application_data='{}'".format(
self.application_data
)
return "ApplicationSpecificInformation({})".format(
", ".join(
[
application_namespace,
application_data
]
)
)
def __str__(self):
return str(
{
"application_namespace": self.application_namespace,
"application_data": self.application_data
}
)
def __eq__(self, other):
if isinstance(other, ApplicationSpecificInformation):
if self.application_namespace != other.application_namespace:
return False
elif self.application_data != other.application_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ApplicationSpecificInformation):
return not (self == other)
else:
return NotImplemented
class ObjectGroup(sql.Base):
__tablename__ = "object_groups"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_object_group = sqlalchemy.Column(
"object_group",
sqlalchemy.String,
nullable=False
)
managed_objects = sqlalchemy.orm.relationship(
"ManagedObject",
secondary=object_group_map,
back_populates="object_groups"
)
def __init__(self, object_group=None):
"""
Create an ObjectGroup attribute.
Args:
object_group (str): A string specifying the object group. Required.
"""
super(ObjectGroup, self).__init__()
self.object_group = object_group
@property
def object_group(self):
return self._object_group
@object_group.setter
def object_group(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._object_group = value
else:
raise TypeError("The object group must be a string.")
def __repr__(self):
object_group = "object_group='{}'".format(self.object_group)
return "ObjectGroup({})".format(object_group)
def __str__(self):
return str({"object_group": self.object_group})
def __eq__(self, other):
if isinstance(other, ObjectGroup):
if self.object_group != other.object_group:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObjectGroup):
return not (self == other)
else:
return NotImplemented
| apache-2.0 | 5,590,796,173,671,300,000 | 34.45098 | 79 | 0.593939 | false | 4.302605 | false | false | false |
rackerlabs/marconi | marconi/queues/transport/wsgi/v1_0/homedoc.py | 1 | 4656 | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
#------------------------------------------------------------------
# Queues
#------------------------------------------------------------------
'rel/queues': {
'href-template': '/v1/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v1/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET', 'HEAD', 'PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue-metadata': {
'href-template': '/v1/queues/{queue_name}/metadata',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET', 'PUT'],
'formats': {
'application/json': {},
},
},
},
'rel/queue-stats': {
'href-template': '/v1/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
#------------------------------------------------------------------
# Messages
#------------------------------------------------------------------
'rel/messages': {
'href-template': ('/v1/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post-messages': {
'href-template': '/v1/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
#------------------------------------------------------------------
# Claims
#------------------------------------------------------------------
'rel/claim': {
'href-template': '/v1/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
}
}
class Resource(object):
def __init__(self):
document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4)
self.document_utf8 = document.encode('utf-8')
def on_get(self, req, resp, project_id):
resp.data = self.document_utf8
resp.content_type = 'application/json-home'
resp.cache_control = ['max-age=86400']
# status defaults to 200
| apache-2.0 | 9,216,185,514,893,187,000 | 31.788732 | 79 | 0.391323 | false | 4.656 | false | false | false |
dex4er/django-pyc | django_pyc/management/commands/clearpyc.py | 1 | 2097 | import argparse
import os
import re
import sys
from django.core.management import base
class Command(base.BaseCommand):
help = \
"""
Clears .pyc files from the project.
"""
pattern = r'^.+\.pyc$'
def add_arguments(self, parser):
parser.add_argument(
'--noinput', dest='noinput', action='store_true', default=False,
help="Do NOT prompt the user for input of any kind."
)
parser.add_argument(
'-f', '--force', dest='force', action='store_true', default=False,
help="Force the removing files without user interaction."
)
parser.add_argument(
'-p', '--with-pythonpath', dest='with_pythonpath', action='store_true', default=False,
help="Remove also PYTHONPATH libraries."
)
parser.add_argument(
'path', nargs=argparse.REMAINDER,
help="Directories with libraries"
)
def handle(self, *args, **options):
dirs = options['path'] or sys.path[:1]
if options['with_pythonpath']:
dirs += sys.path[1:]
for d in dirs:
d = d or '.'
if os.path.isdir(d) and os.access(d, os.W_OK):
for dirname, _, filenames in os.walk(d):
for filename in filenames:
fullname = os.path.join(dirname, filename)
if re.search(self.pattern, fullname):
if not options['force'] and not options['noinput']:
confirm_action = input(
"Do you want to delete '%s'? [y/N] " % fullname)
if confirm_action != 'y':
continue
os.remove(fullname)
if int(options['verbosity']) >= 2:
self.stdout.write("Removed %s" % fullname)
else:
if int(options['verbosity']) >= 2:
self.stdout.write("Skipped %s" % d)
| lgpl-3.0 | 5,796,214,227,565,749,000 | 36.446429 | 98 | 0.48784 | false | 4.558696 | false | false | false |
daite/textparser | analyze_japanese.py | 1 | 5424 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015 daite
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bs4 import BeautifulSoup as BS
from urlparse import urljoin
import requests
import argparse
import setting
import codecs
import os
class JapTextParser:
def __init__(self, mode='editorial'):
'''
:: init function of basic class
'''
self.mode = mode
self.text_encoding = setting.text_encoding
self.output_file = setting.output_file
self.dedupe_output_file = setting.dedupe_output_file
def analyze_japanese_text(self, search_filter=setting.word_filter):
'''
:: analyze japanese text when given search_filter
:: need to add error handling
'''
for sentence in self.get_text_from_url():
url = setting.app_url(setting.app_id,
search_filter,
sentence)
r = requests.get(url)
status_code = r.status_code
if status_code == 200:
print '[%s] ===> [%s] OK' %(self.mode, status_code)
self.save_text(r.text)
else:
raise RuntimeError("check it")
self.dedupe() # deduping text
def get_text_from_url(self):
'''
:: get text from url
'''
pass
def dedupe(self):
'''
:: dedupe text data
'''
print('deduping.....')
text_list = set()
with codecs.open(self.output_file, 'r',
encoding=self.text_encoding) as f:
for x in f.readlines() :
text_list.add(x)
for text in text_list:
with codecs.open(self.dedupe_output_file, 'a',
encoding=self.text_encoding) as g:
g.write(text)
print('cleaning up...')
os.remove(self.output_file)
def save_text(self, res_text):
'''
:: save useful information to txt file
:: returned by yahoo japanese analyze server
'''
for word in BS(res_text).findAll('word'):
category = word.find('pos').text
kanji = word.find('surface').text
hiragana = word.find('reading').text
try:
with codecs.open(self.output_file, 'a',
encoding=self.text_encoding) as f:
text = '%s\t%s\t%s' %(category, kanji, hiragana)
f.write(text + '\r\n')
except Exception as e:
os.remove(self.output_file)
raise RuntimeError("Error", e)
@staticmethod
def get_japanese_meaning(kanji):
'''
:: get japanese meaning from kotobank
'''
url = 'https://kotobank.jp/word/%s' %japanese_word
try:
japanese_meaning = BS(requests.get(url).text).\
find('meta', {'property':'og:description'})['content']
except:
japanese_meaning = 'errors!'
return japanese_meaning
@staticmethod
def get_response(url):
'''
:: staticmethod -> get BS response from url
'''
return BS(requests.get(url).content)
class AsahiParser(JapTextParser):
'''
:: AsahiParser class
'''
def get_text_from_url(self):
'''
:: override function from base class
'''
if self.mode == 'editorial':
url = setting.asahi_editorial_url
else:
url = setting.asahi_tensheng_url
soup = self.get_response(url)
div_tag = soup.find('div', {'class': 'ArticleText'})
for p_tag in div_tag.findAll('p'):
yield p_tag.text
class NikkeiParser(JapTextParser):
'''
:: NikkeiParser class
'''
def get_text_from_url(self):
'''
:: override function from base class
:: get the lastest 2 editorial pages
'''
nikkei_main_url = setting.nikkei_main_url
soup_main = self.get_response(nikkei_main_url).\
findAll('h4', {'class': 'cmn-article_title'})[:2]
for s in soup_main:
nikkei_editorial_url = urljoin(setting.nikkei_host_url, s.find('a')['href'])
soup_editorial = self.get_response(nikkei_editorial_url).\
find('div', {'class': 'cmn-article_text JSID_key_fonttxt'})
for text in soup_editorial.findAll('p'):
yield text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--tensheng',
help='fetch asahi tensheng',
action="store_true")
parser.add_argument('-e','--editorial',
help='fetch asahi editorial',
action="store_true")
parser.add_argument('-n','--nikkei',
help='fetch nikkei editorial',
action="store_true")
args = parser.parse_args()
if args.tensheng:
a = AsahiParser(mode='tensheng')
a.analyze_japanese_text()
elif args.editorial:
a = AsahiParser()
a.analyze_japanese_text()
elif args.nikkei:
n = NikkeiParser()
n.analyze_japanese_text()
else:
parser.print_help()
exit(1)
| mit | -3,033,339,715,961,497,000 | 29.133333 | 80 | 0.665007 | false | 3.147998 | false | false | false |
znick/anytask | anytask/tasks/models.py | 1 | 16245 | # coding: utf-8
import copy
import sys
import json
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q, Max
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.utils.html import escape
from courses.models import Course
from groups.models import Group
def check_json(text):
try:
text_to_json = json.loads(text, strict=False)
if not isinstance(text_to_json, dict):
raise ValueError
return text_to_json
except (ValueError, TypeError):
return False
def get_lang_text(text, lang):
text_ = check_json(text)
if text_:
lang = lang if lang in text_ else settings.LANGUAGE_CODE
return text_[lang]
return unicode(text)
class Task(models.Model):
title = models.CharField(max_length=191, db_index=True, null=True, blank=True)
short_title = models.CharField(max_length=15, db_index=True, null=True, blank=True)
course = models.ForeignKey(Course, db_index=True, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=False, related_name='groups_set')
weight = models.IntegerField(db_index=True, null=False, blank=False, default=0)
is_hidden = models.BooleanField(db_index=True, null=False, blank=False, default=False)
parent_task = models.ForeignKey('self', db_index=True, null=True, blank=True, related_name='children')
task_text = models.TextField(null=True, blank=True, default=None)
score_max = models.IntegerField(db_index=True, null=False, blank=False, default=0)
max_students = models.IntegerField(null=False, blank=False, default=0)
contest_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
rb_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
TYPE_FULL = 'All'
TYPE_SIMPLE = 'Only mark'
TYPE_SEMINAR = 'Seminar'
TYPE_MATERIAL = 'Material'
TYPE_IPYNB = 'Jupyter Notebook'
TASK_TYPE_CHOICES = (
(TYPE_FULL, _('s_obsuzhdeniem')),
(TYPE_SIMPLE, _('tolko_ocenka')),
(TYPE_MATERIAL, _('material')),
(TYPE_SEMINAR, _('seminar')),
(TYPE_IPYNB, _('jupyter notebook'))
)
type = models.CharField(db_index=False, max_length=128, choices=TASK_TYPE_CHOICES, default=TYPE_FULL)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
deadline_time = models.DateTimeField(auto_now=False, blank=True, null=True, default=None)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
contest_id = models.IntegerField(db_index=True, null=False, blank=False, default=0)
problem_id = models.CharField(max_length=128, db_index=True, null=True, blank=True)
send_to_users = models.BooleanField(db_index=False, null=False, blank=False, default=False)
sended_notify = models.BooleanField(db_index=True, null=False, blank=False, default=True)
one_file_upload = models.BooleanField(db_index=False, null=False, blank=False, default=False)
accepted_after_contest_ok = models.BooleanField(db_index=False, null=False, blank=False, default=False)
score_after_deadline = models.BooleanField(db_index=False, null=False, blank=False, default=True)
nb_assignment_name = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return unicode(self.title)
def get_title(self, lang=settings.LANGUAGE_CODE):
return escape(get_lang_text(self.title, lang))
def get_description(self, lang=settings.LANGUAGE_CODE):
return get_lang_text(self.task_text, lang)
def is_text_json(self):
return check_json(self.task_text)
@property
def max_students_on_task(self):
return self.max_students or self.course.max_students_per_task or settings.PYTHONTASK_MAX_USERS_PER_TASK
def user_can_take_task(self, user):
for task_taken in TaskTaken.objects.filter(task=self):
task_taken.update_status()
if user.is_anonymous():
return (False, 'Необходимо залогиниться')
if self.is_hidden:
return (False, 'Задача скрыта')
if not self.course.groups.filter(students=user).count():
return (False, u'Необходимо числиться в одной из групп курса')
if Task.objects.filter(parent_task=self).count() > 0:
return (False, u'')
if TaskTaken.objects.filter(task=self).filter(user=user).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED))).count() != 0:
return (False, u'')
if self.parent_task is not None:
tasks = Task.objects.filter(parent_task=self.parent_task)
if TaskTaken.objects.filter(user=user).filter(task__in=tasks) \
.exclude(status=TaskTaken.STATUS_CANCELLED) \
.exclude(status=TaskTaken.STATUS_DELETED) \
.count() > 0:
return (False, u'Вы уже взяли другую подзадачу из этой задачи')
max_not_scored_tasks = self.course.max_not_scored_tasks or \
settings.PYTHONTASK_MAX_TASKS_WITHOUT_SCORE_PER_STUDENT
if max_not_scored_tasks:
if TaskTaken.objects.filter(user=user) \
.filter(task__course=self.course) \
.filter(status=TaskTaken.STATUS_TAKEN).count() >= max_not_scored_tasks:
return (False, u'У вас слишком много неоцененных задач')
max_incomplete_tasks = self.course.max_incomplete_tasks or settings.PYTHONTASK_MAX_INCOMPLETE_TASKS
if max_incomplete_tasks:
all_scored = TaskTaken.objects.filter(user=user).filter(task__course=self.course) \
.filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(
status=TaskTaken.STATUS_SCORED)))
if sum(t.score != t.task.score_max for t in all_scored) + 1 > max_incomplete_tasks:
return (False, u'У вас слишком много не до конца доделанных задач')
max_students = self.max_students_on_task or settings.PYTHONTASK_MAX_USERS_PER_TASK
if max_students:
if TaskTaken.objects.filter(task=self).filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(
status=TaskTaken.STATUS_SCORED))).count() >= max_students:
return (
False,
u'Задача не может быть взята более чем %d студентами' % max_students)
try:
task_taken = TaskTaken.objects.filter(task=self).filter(user=user).get(status=TaskTaken.STATUS_BLACKLISTED)
blacklist_expired_date = task_taken.blacklisted_till
if blacklist_expired_date:
return (False, u'Вы сможете взять эту задачу с %s' % blacklist_expired_date.strftime("%d.%m.%Y"))
except TaskTaken.DoesNotExist:
pass
return (True, u'')
def user_can_cancel_task(self, user):
if user.is_anonymous() or self.is_hidden:
return False
if TaskTaken.objects.filter(task=self).filter(user=user).filter(status=TaskTaken.STATUS_TAKEN).count() != 0:
return True
return False
def user_can_score_task(self, user):
if user.is_anonymous():
return False
return self.course.user_is_teacher(user)
def user_can_pass_task(self, user):
if user.is_anonymous():
return False
if not self.course.is_python_task:
if self.user_can_take_task(user):
return True
try:
task_taken = self.get_task_takens().get(user=user)
return (task_taken.status == TaskTaken.STATUS_TAKEN or task_taken.status == TaskTaken.STATUS_SCORED)
except TaskTaken.DoesNotExist:
return False
return False
def has_parent(self):
return self.parent_task is not None
def has_subtasks(self):
return Task.objects.filter(parent_task=self).count() > 0
def get_subtasks(self):
return Task.objects.filter(parent_task=self)
def get_task_takens(self):
return TaskTaken.objects.filter(task=self).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED)))
def add_user_properties(self, user):
self.can_take = self.user_can_take_task(user)
self.can_cancel = self.user_can_cancel_task(user)
self.can_score = self.user_can_score_task(user)
self.can_pass = self.user_can_pass_task(user)
self.is_shown = not self.is_hidden or self.course.user_is_teacher(user)
def has_issue_access(self):
return self.type not in [self.TYPE_SIMPLE, self.TYPE_MATERIAL, self.TYPE_SEMINAR]
def set_position_in_new_group(self, groups=None):
if not groups:
groups = self.course.groups.all()
else:
for task_related in TaskGroupRelations.objects.filter(task=self).exclude(group__in=groups):
task_related.deleted = True
task_related.save()
for group in list(groups):
task_related, created = TaskGroupRelations.objects.get_or_create(task=self, group=group)
if created:
max_position = TaskGroupRelations.objects.filter(group=group).exclude(id=task_related.id) \
.aggregate(Max('position'))['position__max']
task_related.position = max_position + 1 if max_position is not None else 0
else:
task_related.deleted = False
task_related.save()
def get_url_in_course(self):
return reverse('courses.views.seminar_page', kwargs={'course_id': self.course_id, 'task_id': self.id})
class TaskLog(models.Model):
title = models.CharField(max_length=191, db_index=True, null=True, blank=True)
course = models.ForeignKey(Course, db_index=False, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=False, related_name='groups_log_set')
weight = models.IntegerField(db_index=False, null=False, blank=False, default=0)
parent_task = models.ForeignKey('self', db_index=True, null=True, blank=True, related_name='parent_task_set')
task_text = models.TextField(null=True, blank=True, default=None)
score_max = models.IntegerField(db_index=False, null=False, blank=False, default=0)
contest_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
rb_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
TYPE_FULL = 'All'
TYPE_SIMPLE = 'Only mark'
TASK_TYPE_CHOICES = (
(TYPE_FULL, _(u's_obsuzhdeniem')),
(TYPE_SIMPLE, _(u'tolko_ocenka')),
)
type = models.CharField(db_index=False, max_length=128, choices=TASK_TYPE_CHOICES, default=TYPE_FULL)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
deadline_time = models.DateTimeField(auto_now=False, null=True, default=None)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
contest_id = models.IntegerField(db_index=True, null=False, blank=False, default=0)
problem_id = models.CharField(max_length=128, db_index=True, null=True, blank=True)
def __unicode__(self):
return unicode(self.title)
class TaskTaken(models.Model):
STATUS_TAKEN = 0
STATUS_CANCELLED = 1
STATUS_BLACKLISTED = 2
STATUS_SCORED = 3
STATUS_DELETED = 4
user = models.ForeignKey(User, db_index=True, null=False, blank=False)
task = models.ForeignKey(Task, db_index=True, null=False, blank=False)
issue = models.ForeignKey('issues.Issue', db_index=True, null=True, blank=False)
TASK_TAKEN_STATUSES = (
(STATUS_TAKEN, u'Task taken'),
(STATUS_CANCELLED, u'Task cancelled'),
(STATUS_BLACKLISTED, u'Task blacklisted'),
(STATUS_SCORED, u'Task scored'),
(STATUS_DELETED, u'TaskTaken deleted')
)
status = models.IntegerField(choices=TASK_TAKEN_STATUSES, db_index=True, blank=False, default=0)
EDIT = 'EDIT'
QUEUE = 'QUEUE'
OK = 'OK'
STATUS_CHECK_CHOICES = (
(EDIT, u'Дорешивание'),
(QUEUE, u'Ожидает проверки'),
(OK, u'Задача зачтена и/или больше не принимается'),
)
status_check = models.CharField(db_index=True, max_length=5, choices=STATUS_CHECK_CHOICES, default=EDIT)
taken_time = models.DateTimeField(blank=True, null=True)
blacklisted_till = models.DateTimeField(blank=True, null=True)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
@property
def score(self):
self.update_status()
if not self.issue:
return 0
return self.issue.mark
def update_status(self):
if self.issue and abs(self.issue.mark) > sys.float_info.epsilon and self.status != self.STATUS_SCORED:
self.scored()
if not self.issue.get_byname('responsible_name'):
group = self.task.course.get_user_group(self.user)
if group:
default_teacher = self.task.course.get_default_teacher(group)
if default_teacher:
self.issue.set_byname('responsible_name', default_teacher, author=None)
def take(self):
self.status = self.STATUS_TAKEN
if self.taken_time is None:
self.taken_time = timezone.now()
self.save()
def cancel(self):
dt_from_taken_delta = timezone.now() - self.taken_time
if (dt_from_taken_delta.days) <= settings.PYTHONTASK_MAX_DAYS_TO_FULL_CANCEL:
self.taken_time = None
self.status = self.STATUS_CANCELLED
self.save()
def blacklist(self):
self.status = self.STATUS_BLACKLISTED
self.blacklisted_till = timezone.now() + timedelta(days=settings.PYTHONTASK_DAYS_DROP_FROM_BLACKLIST)
self.save()
def scored(self):
self.status = self.STATUS_SCORED
self.save()
def mark_deleted(self):
self.status = self.STATUS_DELETED
self.taken_time = None
self.blacklisted_till = None
self.save()
class Meta:
unique_together = (("user", "task"),)
def __unicode__(self):
return unicode(self.task) + " (" + unicode(self.user) + ")"
class TaskGroupRelations(models.Model):
task = models.ForeignKey(Task, db_index=False, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=False, blank=False)
position = models.IntegerField(db_index=False, null=False, blank=False, default=0)
deleted = models.BooleanField(db_index=False, null=False, blank=False, default=False)
class Meta:
unique_together = ("task", "group")
def __unicode__(self):
return ' '.join([unicode(self.task), unicode(self.group), unicode(self.position)])
def task_save_to_log_post_save(sender, instance, created, **kwargs):
task_log = TaskLog()
task_log_dict = copy.deepcopy(instance.__dict__)
task_log_dict['id'] = None
task_log.__dict__ = task_log_dict
task_log.sended_notify = False
task_log.save()
task_log.groups.add(*instance.groups.all())
# post_save.connect(task_save_to_log_post_save, sender=Task)
| mit | 2,597,945,272,377,163,300 | 39.148615 | 119 | 0.650166 | false | 3.371906 | false | false | false |
elishowk/django-poser | poser/utils/page.py | 1 | 2014 | # -*- coding: utf-8 -*-
from django.conf import settings
import re
APPEND_TO_SLUG = "-copy"
COPY_SLUG_REGEX = re.compile(r'^.*-copy(?:-(\d)*)?$')
def is_valid_page_slug(page, slug, site, path=None):
"""Validates given slug depending on settings.
"""
# Exclude the page with the publisher_state == page.PUBLISHER_STATE_DELETE
from poser.models.pagemodel import Page
qs = Page.objects.filter(site=site)
## Check for slugs
if qs.filter(slug=slug).count():
return False
## Check for path
if path and qs.filter(path=path).count():
return False
return True
def get_available_slug(page, new_slug=None):
"""Smart function generates slug for title if current title slug cannot be
used. Appends APPEND_TO_SLUG to slug and checks it again.
(Used in page copy function)
Returns: slug
"""
slug = new_slug or page.slug
# We need the full path for the title to check for conflicting urls
page.slug = slug
page.update_path()
path = page.path
# This checks for conflicting slugs/overwrite_url, for both published and unpublished pages
# This is a simpler check than in page_resolver.is_valid_url which
# takes into account actualy page URL
if not is_valid_page_slug(page, slug, page.site, path):
# add nice copy attribute, first is -copy, then -copy-2, -copy-3, ....
match = COPY_SLUG_REGEX.match(slug)
if match:
try:
next = int(match.groups()[0]) + 1
slug = "-".join(slug.split('-')[:-1]) + "-%d" % next
except TypeError:
slug = slug + "-2"
else:
slug = slug + APPEND_TO_SLUG
return get_available_slug(page, slug)
else:
return slug
def check_title_slugs(page):
"""Checks page slugs for duplicity if required, used after page move/
cut/paste.
"""
old_slug = page.slug
page.slug = get_available_slug(page)
if page.slug != old_slug:
page.save()
| agpl-3.0 | 1,039,450,840,049,646,100 | 31.483871 | 95 | 0.618669 | false | 3.628829 | false | false | false |
donkawechico/arguman.org | web/premises/views.py | 1 | 23259 | # -*- coding:utf-8 -*-
import json
from datetime import timedelta
from markdown2 import markdown
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db.models import Max, Sum
from django.utils.timezone import now
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.generic import DetailView, TemplateView, CreateView, View
from django.views.generic.edit import UpdateView
from django.utils.translation import get_language
from django.db.models import Count
from blog.models import Post
from premises.models import Contention, Premise
from premises.forms import (ArgumentCreationForm, PremiseCreationForm,
PremiseEditForm, ReportForm)
from premises.signals import (added_premise_for_premise,
added_premise_for_contention,
reported_as_fallacy,
supported_a_premise)
from premises.templatetags.premise_tags import check_content_deletion
from premises.mixins import PaginationMixin, NextURLMixin
from newsfeed.models import Entry
from profiles.mixins import LoginRequiredMixin
from profiles.models import Profile
def get_ip_address(request):
return (request.META.get('HTTP_X_FORWARDED_FOR') or
request.META.get('REMOTE_ADDR'))
class ContentionDetailView(DetailView):
queryset = (Contention.objects
.select_related('user')
.prefetch_related('premises'))
context_object_name = 'contention'
def get_template_names(self):
view = self.request.GET.get("view")
name = ("list_view" if view == "list" else "tree_view")
return ["premises/%s.html" % name]
def get_parent(self):
premise_id = self.kwargs.get("premise_id")
if premise_id:
return get_object_or_404(Premise, id=premise_id)
def get_premises(self):
contention = self.get_parent() or self.get_object()
return contention.published_children()
def get_context_data(self, **kwargs):
contention = self.get_object()
edit_mode = (
self.request.user.is_superuser or
self.request.user.is_staff or
contention.user == self.request.user)
return super(ContentionDetailView, self).get_context_data(
premises=self.get_premises(),
parent_premise=self.get_parent(),
path=contention.get_absolute_url(),
edit_mode=edit_mode,
serialized=contention.serialize(),
**kwargs)
class ContentionJsonView(DetailView):
model = Contention
def render_to_response(self, context, **response_kwargs):
contention = self.get_object(self.get_queryset())
return HttpResponse(json.dumps({
"nodes": self.build_tree(contention, self.request.user),
}), content_type="application/json")
def build_tree(self, contention, user):
return {
"name": contention.title,
"parent": None,
"pk": contention.pk,
"owner": contention.owner,
"sources": contention.sources,
"is_singular": self.is_singular(contention),
"children": self.get_premises(contention, user)
}
def get_premises(self, contention, user, parent=None):
children = [{
"pk": premise.pk,
"name": premise.text,
"parent": parent.text if parent else None,
"reportable_by_authenticated_user": self.user_can_report(
premise, user),
"report_count": premise.reports.count(),
"user": {
"id": premise.user.id,
"username": premise.user.username,
"absolute_url": reverse("auth_profile",
args=[premise.user.username])
},
"sources": premise.sources,
"premise_type": premise.premise_class(),
"children": (self.get_premises(contention, user, parent=premise)
if premise.published_children().exists() else [])
} for premise in contention.published_premises(parent)]
return children
def user_can_report(self, premise, user):
if user.is_authenticated() and user != premise.user:
return not premise.reported_by(user)
return False
def is_singular(self, contention):
result = contention.premises.all().aggregate(
max_sibling=Max('sibling_count'))
return result['max_sibling'] <= 1
class HomeView(TemplateView, PaginationMixin):
template_name = "index.html"
tab_class = "featured"
paginate_by = 20
def get_context_data(self, **kwargs):
contentions = self.get_contentions()
if self.request.user.is_authenticated():
notifications_qs = self.get_unread_notifications()
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
else:
notifications = None
return super(HomeView, self).get_context_data(
next_page_url=self.get_next_page_url(),
tab_class=self.tab_class,
notifications=notifications,
has_next_page=self.has_next_page(),
announcements=self.get_announcements(),
contentions=contentions, **kwargs)
def get_announcements(self):
return Post.objects.filter(is_announcement=True)
def get_unread_notifications(self):
return (self.request.user
.notifications
.filter(is_read=False)[:5])
def mark_as_read(self, notifications):
pks = notifications.values_list("id", flat=True)
(self.request.user
.notifications
.filter(id__in=pks)
.update(is_read=True))
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.language()
.filter(is_featured=True)
.order_by("-date_modification"))
if paginate:
contentions = (contentions[self.get_offset(): self.get_limit()])
return contentions
class NotificationsView(LoginRequiredMixin, HomeView):
template_name = "notifications.html"
def get_context_data(self, **kwargs):
notifications_qs = self.request.user.notifications.all()[:40]
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
return super(HomeView, self).get_context_data(
notifications=notifications,
**kwargs)
class SearchView(HomeView):
tab_class = 'search'
template_name = 'search/search.html'
partial_templates = {
'contentions': 'search/contention.html',
'users': 'search/profile.html',
'premises' : 'search/premise.html'
}
method_mapping = {'contentions': "get_contentions",
'users': "get_users",
'premises': "get_premises"}
def dispatch(self, request, *args, **kwargs):
self.type = request.GET.get('type', 'contentions')
if not self.method_mapping.get(self.type):
raise Http404()
return super(SearchView, self).dispatch(request, *args, **kwargs)
def get_keywords(self):
return self.request.GET.get('keywords') or ""
def has_next_page(self):
method = getattr(self, self.method_mapping[self.type])
total = method().count()
return total > (self.get_offset() + self.paginate_by)
def get_search_bundle(self):
method = getattr(self, self.method_mapping[self.type])
return [{'template': self.partial_templates[self.type],
'object': item} for item in method()]
def get_context_data(self, **kwargs):
return super(SearchView, self).get_context_data(
results=self.get_search_bundle(),
**kwargs)
def get_next_page_url(self):
offset = self.get_offset() + self.paginate_by
return '?offset=%(offset)s&keywords=%(keywords)s&type=%(type)s' % {
"offset": offset,
"type": self.type,
"keywords": self.get_keywords()
}
def get_premises(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 3:
result = Premise.objects.none()
else:
result = (Premise.objects.filter(
argument__language=get_language(),
text__contains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_users(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Profile.objects.none()
else:
result = (Profile.objects.filter(
username__icontains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_contentions(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Contention.objects.none()
else:
result = (Contention
.objects
.filter(title__icontains=keywords,
language=get_language()))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
class NewsView(HomeView):
tab_class = "news"
def get_contentions(self, paginate=True):
contentions = (
Contention
.objects
.language()
.filter(is_published=True)
)
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class StatsView(HomeView):
tab_class = "stats"
template_name = "stats.html"
partial_templates = {
Profile: "stats/profile.html",
Contention: "stats/contention.html",
Premise: "stats/premise.html",
}
method_mapping = {
"active_users": "get_active_users",
"supported_users": "get_supported_users",
"disgraced_users": "get_disgraced_users",
"supported_premises": "get_supported_premises",
"fallacy_premises": "get_fallacy_premises",
"crowded_contentions": "get_crowded_contentions",
}
time_ranges = [7, 30]
def get_context_data(self, **kwargs):
return super(StatsView, self).get_context_data(
stats=self.get_stats_bundle(),
stats_type=self.get_stats_type(),
days=self.days,
**kwargs)
def get_stats_type(self):
return self.request.GET.get("what")
def build_time_filters(self, date_field="date_creation"):
days = self.request.GET.get("days")
if not days or days == "all":
self.days = None
return {}
try:
days = int(days)
except (TypeError, ValueError):
days = None
if not days or days not in self.time_ranges:
raise Http404()
self.days = days
field_expression = "%s__gt" % date_field
return {
field_expression: timezone.now() - timedelta(days=days)
}
def get_stats_bundle(self):
stat_type = self.get_stats_type()
if stat_type not in self.method_mapping:
raise Http404()
method = getattr(self, self.method_mapping[stat_type])
return [
{
"template": self.partial_templates[type(item)],
"object": item
} for item in method()
]
def get_active_users(self):
return Profile.objects.annotate(
premise_count=Sum("premise"),
).filter(
premise_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-premise_count")[:10]
def get_supported_users(self):
return Profile.objects.annotate(
supporter_count=Sum("premise__supporters"),
).filter(
supporter_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-supporter_count")[:10]
def get_disgraced_users(self):
return Profile.objects.annotate(
report_count=Sum("premise__reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-report_count")[:10]
def get_supported_premises(self):
return Premise.objects.annotate(
supporter_count=Sum("supporters")
).filter(
argument__language=get_language(),
supporter_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-supporter_count")[:50]
def get_fallacy_premises(self):
return Premise.objects.annotate(
report_count=Sum("reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-report_count")[:10]
def get_crowded_contentions(self):
return Contention.objects.annotate(
premise_count=Sum("premises"),
).filter(
language=get_language(),
premise_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-premise_count")[:10]
class UpdatedArgumentsView(HomeView):
tab_class = "updated"
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.filter(is_published=True)
.order_by('-date_modification'))
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class ControversialArgumentsView(HomeView):
tab_class = "controversial"
def get_contentions(self, paginate=True):
last_week = now() - timedelta(days=3)
contentions = (Contention
.objects
.annotate(num_children=Count('premises'))
.order_by('-num_children')
.filter(date_modification__gte=last_week))
if paginate:
return contentions[self.get_offset():self.get_limit()]
return contentions
class AboutView(TemplateView):
template_name = "about.html"
def get_text_file(self):
language = get_language()
return render_to_string("about-%s.md" % language)
def get_context_data(self, **kwargs):
content = markdown(self.get_text_file())
return super(AboutView, self).get_context_data(
content=content, **kwargs)
class TosView(TemplateView):
template_name = "tos.html"
def get_context_data(self, **kwargs):
content = markdown(render_to_string("tos.md"))
return super(TosView, self).get_context_data(
content=content, **kwargs)
class ArgumentCreationView(LoginRequiredMixin, CreateView):
template_name = "premises/new_contention.html"
form_class = ArgumentCreationForm
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.ip_address = get_ip_address(self.request)
form.instance.language = get_language()
form.instance.is_published = True
response = super(ArgumentCreationView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentUpdateView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_contention.html"
form_class = ArgumentCreationForm
def get_queryset(self):
contentions = Contention.objects.all()
if self.request.user.is_superuser:
return contentions
return contentions.filter(user=self.request.user)
def form_valid(self, form):
form.instance.user = self.request.user
response = super(ArgumentUpdateView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentPublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = True
contention.save()
messages.info(request, u"Argument is published now.")
return redirect(contention)
class ArgumentUnpublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = False
contention.save()
messages.info(request, u"Argüman yayından kaldırıldı.")
return redirect(contention)
class ArgumentDeleteView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
if check_content_deletion(contention):
# remove notification
Entry.objects.delete(contention.get_newsfeed_type(), contention.id)
contention.delete()
messages.info(request, u"Argument has been removed.")
return redirect("home")
else:
messages.info(request, u"Argument cannot be deleted.")
return redirect(contention)
delete = post
class PremiseEditView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_premise.html"
form_class = PremiseEditForm
def get_queryset(self):
premises = Premise.objects.all()
if self.request.user.is_superuser:
return premises
return premises.filter(user=self.request.user)
def form_valid(self, form):
response = super(PremiseEditView, self).form_valid(form)
form.instance.argument.update_sibling_counts()
return response
def get_context_data(self, **kwargs):
return super(PremiseEditView, self).get_context_data(**kwargs)
class PremiseCreationView(NextURLMixin, LoginRequiredMixin, CreateView):
template_name = "premises/new_premise.html"
form_class = PremiseCreationForm
def get_context_data(self, **kwargs):
return super(PremiseCreationView, self).get_context_data(
contention=self.get_contention(),
view=self.get_view_name(),
parent=self.get_parent(),
**kwargs)
def form_valid(self, form):
contention = self.get_contention()
form.instance.user = self.request.user
form.instance.argument = contention
form.instance.parent = self.get_parent()
form.instance.is_approved = True
form.instance.ip_address = get_ip_address(self.request)
form.save()
contention.update_sibling_counts()
if form.instance.parent:
added_premise_for_premise.send(sender=self,
premise=form.instance)
else:
added_premise_for_contention.send(sender=self,
premise=form.instance)
contention.date_modification = timezone.now()
contention.save()
return redirect(
form.instance.get_parent().get_absolute_url() +
self.get_next_parameter()
)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_parent(self):
parent_pk = self.kwargs.get("pk")
if parent_pk:
return get_object_or_404(Premise, pk=parent_pk)
class PremiseSupportView(NextURLMixin, LoginRequiredMixin, View):
def get_premise(self):
premises = Premise.objects.exclude(user=self.request.user)
return get_object_or_404(premises, pk=self.kwargs['pk'])
def post(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.add(self.request.user)
supported_a_premise.send(sender=self, premise=premise,
user=self.request.user)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class PremiseUnsupportView(PremiseSupportView):
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.remove(self.request.user)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
post = delete
class PremiseDeleteView(LoginRequiredMixin, View):
def get_premise(self):
if self.request.user.is_staff:
premises = Premise.objects.all()
else:
premises = Premise.objects.filter(user=self.request.user)
return get_object_or_404(premises,
pk=self.kwargs['pk'])
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.delete()
premise.update_sibling_counts()
contention = self.get_contention()
if not contention.premises.exists():
contention.is_published = False
contention.save()
return redirect(contention)
post = delete
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class ReportView(NextURLMixin, LoginRequiredMixin, CreateView):
form_class = ReportForm
template_name = "premises/report.html"
def get_context_data(self, **kwargs):
return super(ReportView, self).get_context_data(
premise=self.get_premise(),
view=self.get_view_name(),
**kwargs)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_premise(self):
return get_object_or_404(Premise, pk=self.kwargs['pk'])
def get_initial(self):
return {
'contention': self.get_contention(),
'premise': self.get_premise(),
'reporter': self.request.user
}
def form_valid(self, form):
contention = self.get_contention()
premise = self.get_premise()
form.instance.contention = contention
form.instance.premise = premise
form.instance.reporter = self.request.user
form.save()
reported_as_fallacy.send(sender=self, report=form.instance)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
| mit | -2,597,903,517,027,247,000 | 32.458993 | 79 | 0.603122 | false | 3.853828 | false | false | false |
jnosal/seth | seth/tests/test_authentication.py | 1 | 2947 | from seth import auth
from seth.tests import IntegrationTestBase
from seth.classy.rest import generics
class DefaultAuthenticatedResource(generics.GenericApiView):
authentication_policy = None
def get(self, **kwargs):
return {}
class BaseAuthenticatedTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.register_resource(DefaultAuthenticatedResource, '/test_basic')
def test_default_setup(self):
r = self.app.get('/test_basic')
self.assertEqual(r.status_int, 200)
class TokenAuthenticationPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class CheckQueryParamsResource(generics.GenericApiView):
authentication_policy = auth.SecretTokenAuthenticationPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResource, '/test_token')
def test_no_token_in_params(self):
r = self.app.get('/test_token', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_wrong_token_in_params(self):
r = self.app.get('/test_token?token=wrong_token', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_token_in_params_wrong_param_name(self):
r = self.app.get('/test_token?tokennamewrong=secret', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_token_param_name_and_value(self):
r = self.app.get('/test_token?token=secret')
self.assertEqual(r.status_int, 200)
class CheckHeaderAuthenticatioPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class AllowHeaderAuthPolicy(auth.HeaderAuthenticationPolicy):
header_name = 'My-Header'
header_secret = 'My-Value'
class CheckQueryParamsResourceSecond(generics.GenericApiView):
authentication_policy = AllowHeaderAuthPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_header')
def test_no_header_in_request(self):
r = self.app.get('/test_header', headers={}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_header_in_request_but_incorrect_value(self):
r = self.app.get('/test_header', headers={'My-Header': '123'}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_value_in_header_but_wrong_header_name(self):
r = self.app.get('/test_header', headers={'Wrong': 'My-Value'}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_header_name_and_value(self):
r = self.app.get('/test_header', headers={'My-Header': 'My-Value'}, expect_errors=True)
self.assertEqual(r.status_int, 200) | mit | 4,533,596,743,129,733,600 | 33.682353 | 95 | 0.673906 | false | 3.79768 | true | false | false |
foursquare/pants | tests/python/pants_test/java/test_nailgun_integration.py | 1 | 1196 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class TestNailgunIntegration(PantsRunIntegrationTest):
def test_scala_repl_helloworld_input(self):
"""Integration test to exercise possible closed-loop breakages in NailgunClient, NailgunSession
and InputReader.
"""
target = 'examples/src/scala/org/pantsbuild/example/hello/welcome'
pants_run = self.run_pants(
command=['repl', target, '--quiet'],
stdin_data=(
'import org.pantsbuild.example.hello.welcome.WelcomeEverybody\n'
'println(WelcomeEverybody("World" :: Nil).head)\n'
),
# Override the PANTS_CONFIG_FILES="pants.travis-ci.ini" used within TravisCI to enable
# nailgun usage for the purpose of exercising that stack in the integration test.
config={'DEFAULT': {'execution_strategy': 'nailgun'}}
)
self.assert_success(pants_run)
self.assertIn('Hello, World!', pants_run.stdout_data.splitlines())
| apache-2.0 | 4,745,170,059,667,853,000 | 43.296296 | 99 | 0.72408 | false | 3.761006 | true | false | false |
rustyhowell/raytracer_py | hitable.py | 1 | 2340 | from collections import namedtuple
from vector3 import Vec3, dot
from math import sqrt
from ray import Ray
HitRecord = namedtuple("HitRecord", ['t', 'p', 'normal', 'material'])
class Hitable:
def hit(self, ray_, t_min, t_max):
"""
Determine if the ray will hit the object
:param ray_:
:param t_min:
:param t_max:
:return: Return a tuple: true/hitrecord or False, None
"""
raise NotImplemented("Override in subclass")
class Sphere(Hitable):
def __init__(self, center, radius, material):
self.center = center
self.radius = radius
self.material = material
def hit(self, ray_, t_min, t_max):
assert isinstance(ray_, Ray)
oc = ray_.origin - self.center
a = dot(ray_.direction, ray_.direction)
b = dot(oc, ray_.direction)
c = dot(oc, oc) - self.radius * self.radius
discriminant = b * b - a * c
if discriminant > 0.0:
temp = (-b - sqrt(b*b - a * c)) / a
if t_min < temp < t_max:
p = ray_.point_at_parameter(temp)
rec = HitRecord(t=temp,
p=p,
normal=(p - self.center) / self.radius,
material=self.material
)
return True, rec
temp = (-b + sqrt(b*b - a * c)) / a
if t_min < temp < t_max:
p = ray_.point_at_parameter(temp)
rec = HitRecord(t=temp,
p=p,
normal=(p - self.center) / self.radius,
material=self.material
)
return True, rec
return False, None
class HitableList(Hitable):
def __init__(self):
self.shapes = []
def append(self, shape):
self.shapes.append(shape)
def hit(self, ray_, t_min, t_max):
hit_anything = False
closest_so_far = t_max
rec = None
for shape in self.shapes:
hit, tmprec = shape.hit(ray_, t_min, closest_so_far)
if hit:
hit_anything = True
closest_so_far = tmprec.t
rec = tmprec
return hit_anything, rec
| mit | 6,299,270,001,141,219,000 | 29 | 71 | 0.478205 | false | 4.034483 | false | false | false |
PaddlePaddle/models | PaddleRec/dssm/infer.py | 1 | 1407 | import paddle.fluid as fluid
import numpy as np
import sys
import args
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
def infer(args):
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(args.model_dir, exe)
#构造测试数据
sample_size = 100
l_Qs = []
pos_l_Ds = []
for i in range(sample_size):
l_Q = np.random.rand(1, args.TRIGRAM_D)
l_Qs.append(l_Q)
l_D = np.random.rand(1, args.TRIGRAM_D)
pos_l_Ds.append(l_D)
res = []
for i in range(sample_size):
con_sim = exe.run(infer_program,
feed={"query": l_Qs[i].astype('float32').reshape(1,args.TRIGRAM_D),
"doc_pos": pos_l_Ds[i].astype('float32').reshape(1,args.TRIGRAM_D)},
fetch_list=fetch_vars,
return_numpy=True)
logger.info("query_doc_sim: {:.5f}".format(np.array(con_sim).reshape(-1,1)[0][0]))
if __name__ == "__main__":
import paddle
paddle.enable_static()
args = args.parse_args()
infer(args) | apache-2.0 | -6,892,686,518,800,901,000 | 31.465116 | 105 | 0.55914 | false | 3.266979 | false | false | false |
andres-liiver/IAPB13_suvendatud | Kodutoo_16/Kodutoo_16_Andres.py | 1 | 2985 | '''
Kodutoo 16
14.11.2014
Andres Liiver
'''
import time
from matplotlib import pyplot as plt
from Tund16gen import *
def timeFunc(func, *args):
start = time.clock()
func(*args)
return time.clock() - start
def linear_search(lst, num):
for item in lst:
if item == num:
return True
return False
def binary_search(lst, num, sort=False):
if sort:
lst = sorted(lst)
imin = 0
imax = len(lst)-1
while imax >= imin:
imid = (imin+imax) // 2
if lst[imid] == num:
return True
elif lst[imid] < num:
imin = imid + 1
else:
imax = imid - 1
return False
def main():
linearTimes = []
binary1Times = []
binary2Times = []
ns = [2**i for i in range(1, 13)]
for n in ns:
lst, gen = gimme_my_input(n, "blah")
times = []
# linear search test
for i in range(len(lst)):
times.append(timeFunc(linear_search, lst, next(gen)))
avg_time = sum(times) / len(times)
linearTimes.append(avg_time)
# binary search test 1
times = []
sortedList = sorted(lst)
for i in range(len(lst)):
times.append(timeFunc(binary_search, sortedList, next(gen)))
avg_time = sum(times) / len(times)
binary1Times.append(avg_time)
# binary search test 2
times = []
for i in range(len(lst)):
times.append(timeFunc(binary_search, lst, next(gen), True))
avg_time = sum(times) / len(times)
binary2Times.append(avg_time)
# print table of results
print("| algorithm \t| n \t\t| time (s)")
print()
# print Linear Search
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Linear", n, linearTimes[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Linear", n, linearTimes[i]))
print()
# print Binary Search (presorted)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} | {1} \t\t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
else:
print("| {0} | {1} \t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
print()
# print Binary Search (sort)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
# plot the times
ax = plt.subplot()
ax.set_xlabel("n")
ax.set_xscale("log")
ax.set_ylabel("Time (s)")
ax.set_yscale("log")
ax.plot(ns, linearTimes, "r", label="Linear Search")
ax.plot(ns, binary1Times, "g", label="Binary Search (presorted)")
ax.plot(ns, binary2Times, "b", label="Binary Search (sort)")
ax.legend(loc="upper left", shadow=True);
plt.show()
if __name__ == "__main__":
main() | mit | -7,427,043,884,688,275,000 | 23.883333 | 90 | 0.529313 | false | 3.213132 | false | false | false |
shouya/thinking-dumps | automata/homework/project2/CYK.py | 1 | 4714 | '''
CYK algorithm for Context Free Language
Author: Chenguang Zhu
CS154, Stanford University
'''
import sys,traceback
import os
import string
maxProductionNum = 100 #max number of productions
VarNum = 4
production = [[0] * 3 for i in range(maxProductionNum+1)]
'''Prouductions in Chomsky Normal Form (CNF)
production[i][0] is the number for the variable (0~3, 0: S 1: A, 2: B, 3: C)
If this production is A->BC (two variables), then production[i][1] and production[i][2] will contain the numbers for these two variables
If this production is A->a (a single terminal), then production[i][1] will contain the number for the terminal (0 or 1, 0: a, 1: b), production[i][2]=-1'''
X = [[[False]*3 for i in range(10)] for j in range(10)]
'''X[i][j][s]=true if and only if variable s (0~3, 0: S 1: A, 2: B, 3: C) is in X_ij defined in CYK
Suppose the length of string to be processed is L, then 0<=i<=j<L '''
#check whether (a,b,c) exists in production
def existProd(a, b, c):
global production
for i in range(len(production)):
if ((production[i][0]==a) and
(production[i][1]==b) and
(production[i][2]==c)):
return True
return False
'''CYK algorithm
Calculate the array X
w is the string to be processed'''
def calcCYK(w):
global X
global VarNum
L=len(w)
X=[[[False]*VarNum for i in range(L)] for j in range(L)]
# X=[[[] for i in range(L)] for j in range(L)]
for x in range(L):
calc_cell_basic(x, w)
for dist in range(1,L):
calc_row(dist, L)
tmp = [[lengthify(i) for i in j] for j in X]
X = tmp
def calc_row(dist, l):
global X
for i in range(l - dist):
head = i
tail = i + dist
calc_cell(head, tail)
def lengthify(xs):
global VarNum
result = [False] * VarNum
i = 0
for x in xs:
result[i] = x
i += 1
return result
def calc_cell_basic(col, w):
global X
ww = w[col]
poss = [False] * VarNum
for i in range(7):
if existProd(i,ww,-1):
poss[i] = True
X[col][col] = poss
def prod(xs, ys):
result = []
for x in range(len(xs)):
for y in range(len(ys)):
if xs[x] and ys[y]:
for i in range(7):
if existProd(i, x, y):
result.append(i)
return result
def calc_cell(head, tail):
global X
poss = [False] * VarNum
for i in range(tail - head):
xs = X[head][head + i]
ys = X[head + i + 1][tail]
for i in prod(xs, ys):
poss[i] = True
X[head][tail] = poss
def Start(filename):
global X
global VarNum
global production
result=''
#read data case line by line from file
try:
br=open(filename,'r')
#example on Page 8 of lecture 15_CFL5
production=[[0]*3 for i in range(7)]
production[0][0]=0; production[0][1]=1; production[0][2]=2 #S->AB
production[1][0]=1; production[1][1]=2; production[1][2]=3 #A->BC
production[2][0]=1; production[2][1]=0; production[2][2]=-1 #A->a
production[3][0]=2; production[3][1]=1; production[3][2]=3 #B->AC
production[4][0]=2; production[4][1]=1; production[4][2]=-1 #B->b
production[5][0]=3; production[5][1]=0; production[5][2]=-1 #C->a
production[6][0]=3; production[6][1]=1; production[6][2]=-1 #C->b
result=''
#Read File Line By Line
for string in br:
string=string.strip()
print 'Processing '+string+'...'
length=len(string)
w=[0]*length
for i in range(length):
w[i]=ord(string[i])-ord('a') #convert 'a' to 0 and 'b' to 1
#Use CYK algorithm to calculate X
calcCYK(w)
#Get/print the full table X
for step in range(length-1,-1,-1):
for i in range(length-step):
j=i+step
for k in range(VarNum):
if (X[i][j][k]):
result=result+str(k)
result=result+' '
result=result+'\n'
#Close the input stream
br.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout)
result=result+'error'
return result
def main(filepath):
return Start(filepath)
if __name__ == '__main__':
main(sys.argv[1])
| mit | 6,106,430,936,488,291,000 | 27.098765 | 157 | 0.530972 | false | 3.317382 | false | false | false |
nexusriot/cinder | cinder/volume/drivers/remotefs.py | 1 | 57137 | # Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import inspect
import json
import os
import re
import tempfile
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import compute
from cinder import db
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
nas_opts = [
# TODO(eharney): deprecate nas_ip and change this to nas_host
cfg.StrOpt('nas_ip',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.IntOpt('nas_ssh_port',
default=22,
min=1, max=65535,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
default=None,
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.')),
]
old_vol_type_opts = [cfg.DeprecatedOpt('glusterfs_sparsed_volumes'),
cfg.DeprecatedOpt('glusterfs_qcow2_volumes')]
volume_opts = [
cfg.StrOpt('nas_volume_prov_type',
default='thin',
choices=['thin', 'thick'],
deprecated_opts=old_vol_type_opts,
help=('Provisioning type that will be used when '
'creating volumes.')),
]
CONF = cfg.CONF
CONF.register_opts(nas_opts)
CONF.register_opts(volume_opts)
def locked_volume_id_operation(f, external=False):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named
with the id of the volume. This lock can be used by driver methods
to prevent conflicts with other operations modifying the same volume.
May be applied to methods that take a 'volume' or 'snapshot' argument.
"""
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('volume'):
volume_id = call_args['volume']['id']
elif call_args.get('snapshot'):
volume_id = call_args['snapshot']['volume']['id']
else:
err_msg = _('The decorated method must accept either a volume or '
'a snapshot object')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, volume_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = 'remotefs'
volume_backend_name = None
SHARE_FORMAT_REGEX = r'.+:/.+'
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
if self.configuration:
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_provisioned_capacity(self):
"""Returns the provisioned capacity.
Get the sum of sizes of volumes, snapshots and any other
files on the mountpoint.
"""
provisioned_size = 0.0
for share in self.shares.keys():
mount_path = self._get_mount_point_for_share(share)
out, _ = self._execute('du', '--bytes', mount_path,
run_as_root=True)
provisioned_size += int(out.split()[0])
return round(provisioned_size / units.Gi, 2)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
# If touch file exist, set the bootable flag for the volume
if (os.path.isfile('/etc/cinder/recogimage')):
LOG.debug('DEBUG : setting bootable flag for the volume')
volume['bootable'] = 1
self._create_sparsed_file(volume_path, volume_size, volume)
# Do not try to change permissions of the file here, as we are operating on a sym-link that is not local
else:
self._create_sparsed_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and mount them locally."""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s', self._mounted_shares)
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Delete snapshot.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _fallocate(self, path, size):
"""Creates a raw file of given size in GiB using fallocate."""
self._execute('fallocate', '--length=%sG' % size,
path, run_as_root=True)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
LOG.warning(_LW('%(path)s is being set with open permissions: '
'%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
remotefs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
run_as_root = self._execute_as_root
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'],
run_as_root=run_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'],
run_as_root=run_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_ip,
self.configuration.nas_share_path)):
LOG.debug('Using nas_ip and nas_share_path configuration.')
nas_ip = self.configuration.nas_ip
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_ip, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_ip and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip()
# Replace \040 with a space, to support paths with spaces
share_address = share_address.replace("\\040", " ")
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error(_LE("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud" \
"/blockstorage_nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warning(_LW("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration."),
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration."),
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info(_LI('Cinder secure environment '
'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s'),
err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriver, self).do_setup(context)
self._nova = compute.API()
def _local_volume_dir(self, volume):
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
'basedir': basedir,
'volname': volume_name
}
if not re.match(backing_file_template, info.backing_file):
msg = _("File %(path)s has invalid backing file "
"%(bfile)s, aborting.") % {'path': path,
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path,
run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param: info_path: path to file
:param: empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume['name'])
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume['name'])
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str.
Returns string in a hex format.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: RemoteFSException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('Share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
vol_path = self._local_volume_dir(snapshot['volume'])
self._ensure_share_writable(vol_path)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot['volume']['name'])
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot['volume']['name'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | commited down) |
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | commited down) | exist, needs |
# used here) | | ptr update) |
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
info = self._qemu_img_info(backing_path_full_path,
snapshot['volume']['name'])
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s', result)
except Exception:
LOG.exception(_LE('Call to Nova to create snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception:
LOG.exception(_LE('Call to Nova delete snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.RemoteFSException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
@locked_volume_id_operation
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@locked_volume_id_operation
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@locked_volume_id_operation
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
@locked_volume_id_operation
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref)
@locked_volume_id_operation
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context,
volume,
image_service,
image_meta)
| apache-2.0 | -5,967,509,913,570,102,000 | 38.459254 | 109 | 0.548926 | false | 4.404301 | true | false | false |
Connexions/openstax-cms | news/models.py | 1 | 15456 | from bs4 import BeautifulSoup
from django.db import models
from django import forms
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField, StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.embeds.blocks import EmbedBlock
from wagtail.search import index
from wagtail.core import blocks
from wagtail.core.blocks import TextBlock, StructBlock, StreamBlock, FieldBlock, CharBlock, RichTextBlock, RawHTMLBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.documents.blocks import DocumentChooserBlock
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.api import APIField
from wagtail.images.api.fields import ImageRenditionField
from wagtail.core.models import Site
from modelcluster.fields import ParentalKey
from modelcluster.contrib.taggit import ClusterTaggableManager
from taggit.models import TaggedItemBase
from openstax.functions import build_image_url
from snippets.models import NewsSource
class ImageChooserBlock(ImageChooserBlock):
def get_api_representation(self, value, context=None):
if value:
return {
'id': value.id,
'title': value.title,
'original': value.get_rendition('original').attrs_dict,
}
class PullQuoteBlock(StructBlock):
quote = TextBlock("quote title")
attribution = CharBlock()
class Meta:
icon = "openquote"
class ImageFormatChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('left', 'Wrap left'), ('right', 'Wrap right'), ('mid', 'Mid width'), ('full', 'Full width'),
))
class HTMLAlignmentChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('normal', 'Normal'), ('full', 'Full width'),
))
class ImageBlock(StructBlock):
image = ImageChooserBlock()
caption = RichTextBlock()
alignment = ImageFormatChoiceBlock()
alt_text = blocks.CharBlock(required=False)
class AlignedHTMLBlock(StructBlock):
html = RawHTMLBlock()
alignment = HTMLAlignmentChoiceBlock()
class Meta:
icon = "code"
class BlogStreamBlock(StreamBlock):
paragraph = RichTextBlock(icon="pilcrow")
aligned_image = ImageBlock(label="Aligned image", icon="image")
pullquote = PullQuoteBlock()
aligned_html = AlignedHTMLBlock(icon="code", label='Raw HTML')
document = DocumentChooserBlock(icon="doc-full-inverse")
embed = EmbedBlock(icon="media", label="Embed Media URL")
class NewsIndex(Page):
intro = RichTextField(blank=True)
press_kit = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def articles(self):
articles = NewsArticle.objects.live().child_of(self)
article_data = {}
for article in articles:
article_data['{}'.format(article.slug)] = {
'detail_url': '/apps/cms/api/v2/pages/{}/'.format(article.pk),
'date': article.date,
'heading': article.heading,
'subheading': article.subheading,
'body_blurb': article.first_paragraph,
'pin_to_top': article.pin_to_top,
'article_image': article.article_image,
'article_image_alt': article.featured_image_alt_text,
'author': article.author,
'tags': [tag.name for tag in article.tags.all()],
}
return article_data
content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
DocumentChooserPanel('press_kit'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('intro'),
APIField('press_kit'),
APIField('articles'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
subpage_types = ['news.NewsArticle']
parent_page_types = ['pages.HomePage']
max_count = 1
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/blog/'.format(Site.find_for_request(request).root_url),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
class NewsArticleTag(TaggedItemBase):
content_object = ParentalKey('news.NewsArticle', related_name='tagged_items')
class NewsArticle(Page):
date = models.DateField("Post date")
heading = models.CharField(max_length=250, help_text="Heading displayed on website")
subheading = models.CharField(max_length=250, blank=True, null=True)
author = models.CharField(max_length=250)
featured_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Image should be 1200 x 600"
)
featured_image_alt_text = models.CharField(max_length=250, blank=True, null=True)
def get_article_image(self):
return build_image_url(self.featured_image)
article_image = property(get_article_image)
tags = ClusterTaggableManager(through=NewsArticleTag, blank=True)
body = StreamField(BlogStreamBlock())
pin_to_top = models.BooleanField(default=False)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def first_paragraph(self):
paragraphs = []
for block in self.body:
if block.block_type == 'paragraph':
paragraphs.append(str(block.value))
first_paragraph_parsed = []
soup = BeautifulSoup(paragraphs[0], "html.parser")
for tag in soup.findAll('p'):
first_paragraph_parsed.append(tag)
return str(first_paragraph_parsed[0])
search_fields = Page.search_fields + [
index.SearchField('body'),
index.SearchField('tags'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('title'),
FieldPanel('heading'),
FieldPanel('subheading'),
FieldPanel('author'),
ImageChooserPanel('featured_image'),
FieldPanel('featured_image_alt_text'),
FieldPanel('tags'),
StreamFieldPanel('body'),
FieldPanel('pin_to_top'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('date'),
APIField('title'),
APIField('heading'),
APIField('subheading'),
APIField('author'),
APIField('article_image'),
APIField('featured_image_small', serializer=ImageRenditionField('width-420', source='featured_image')),
APIField('featured_image_alt_text'),
APIField('tags'),
APIField('body'),
APIField('pin_to_top'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
parent_page_types = ['news.NewsIndex']
def save(self, *args, **kwargs):
if self.pin_to_top:
current_pins = self.__class__.objects.filter(pin_to_top=True)
for pin in current_pins:
if pin != self:
pin.pin_to_top = False
pin.save()
return super(NewsArticle, self).save(*args, **kwargs)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/blog/{}/'.format(Site.find_for_request(request).root_url, self.slug),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
class Experts(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField(blank=True, null=True)
title = models.CharField(max_length=255)
bio = models.TextField()
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
def get_expert_image(self):
return build_image_url(self.image)
expert_image = property(get_expert_image)
api_fields = [
APIField('name'),
APIField('email'),
APIField('title'),
APIField('bio'),
APIField('expert_image')
]
panels = [
FieldPanel('name'),
FieldPanel('email'),
FieldPanel('title'),
FieldPanel('bio'),
ImageChooserPanel('image'),
]
class ExpertsBios(Orderable, Experts):
experts_bios = ParentalKey('news.PressIndex', related_name='experts_bios')
class NewsMentionChooserBlock(SnippetChooserBlock):
def get_api_representation(self, value, context=None):
if value:
return {
'id': value.id,
'name': value.name,
'logo': value.news_logo,
}
class NewsMentionBlock(blocks.StructBlock):
source = NewsMentionChooserBlock(NewsSource)
url = blocks.URLBlock()
headline = blocks.CharBlock()
date = blocks.DateBlock()
class Meta:
icon = 'document'
class MissionStatement(models.Model):
statement = models.CharField(max_length=255)
api_fields = ('statement', )
class MissionStatements(Orderable, MissionStatement):
mission_statements = ParentalKey('news.PressIndex', related_name='mission_statements')
class PressIndex(Page):
press_kit = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_press_kit(self):
return build_image_url(self.press_kit)
press_kit_url = property(get_press_kit)
press_inquiry_name = models.CharField(max_length=255, blank=True, null=True)
press_inquiry_phone = models.CharField(max_length=255)
press_inquiry_email = models.EmailField()
experts_heading = models.CharField(max_length=255)
experts_blurb = models.TextField()
mentions = StreamField([
('mention', NewsMentionBlock(icon='document')),
], null=True)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/press/'.format(Site.find_for_request(request).root_url),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
@property
def releases(self):
releases = PressRelease.objects.live().child_of(self)
releases_data = {}
for release in releases:
releases_data['press/{}'.format(release.slug)] = {
'detail_url': '/apps/cms/api/v2/pages/{}/'.format(release.pk),
'date': release.date,
'heading': release.heading,
'excerpt': release.excerpt,
'author': release.author,
}
return releases_data
content_panels = Page.content_panels + [
DocumentChooserPanel('press_kit'),
FieldPanel('press_inquiry_name'),
FieldPanel('press_inquiry_phone'),
FieldPanel('press_inquiry_email'),
FieldPanel('experts_heading'),
FieldPanel('experts_blurb'),
InlinePanel('experts_bios', label="Experts"),
StreamFieldPanel('mentions'),
InlinePanel('mission_statements', label="Mission Statement"),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('press_kit'),
APIField('press_kit_url'),
APIField('releases'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image'),
APIField('experts_heading'),
APIField('experts_blurb'),
APIField('experts_bios'),
APIField('mentions'),
APIField('mission_statements'),
APIField('press_inquiry_name'),
APIField('press_inquiry_phone'),
APIField('press_inquiry_email')
]
subpage_types = ['news.PressRelease']
parent_page_types = ['pages.HomePage']
max_count = 1
class PressRelease(Page):
date = models.DateField("PR date")
heading = models.CharField(max_length=250, help_text="Heading displayed on website")
subheading = models.CharField(max_length=250, blank=True, null=True)
author = models.CharField(max_length=250)
featured_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
featured_image_alt_text = models.CharField(max_length=250, blank=True, null=True)
def get_article_image(self):
return build_image_url(self.featured_image)
article_image = property(get_article_image)
excerpt = models.CharField(max_length=255)
body = StreamField(BlogStreamBlock())
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/press/{}'.format(Site.find_for_request(request).root_url, self.slug),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
search_fields = Page.search_fields + [
index.SearchField('body'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('title'),
FieldPanel('heading'),
FieldPanel('subheading'),
FieldPanel('author'),
ImageChooserPanel('featured_image'),
FieldPanel('featured_image_alt_text'),
FieldPanel('excerpt'),
StreamFieldPanel('body'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('date'),
APIField('title'),
APIField('heading'),
APIField('subheading'),
APIField('author'),
APIField('article_image'),
APIField('featured_image_alt_text'),
APIField('excerpt'),
APIField('body'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
| agpl-3.0 | -7,732,224,415,095,894,000 | 29.788845 | 119 | 0.614777 | false | 3.938838 | false | false | false |
gonicus/gosa | common/src/gosa/common/components/mqtt_proxy.py | 1 | 3896 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import uuid
from tornado import gen
from gosa.common import Environment
from gosa.common.components.json_exception import JSONRPCException
from gosa.common.components.mqtt_handler import MQTTHandler
from gosa.common.gjson import dumps, loads
from tornado.concurrent import Future
class MQTTException(Exception):
pass
class MQTTServiceProxy(object):
"""
The MQTTServiceProxy provides a simple way to use GOsa RPC
services from various clients. Using the proxy object, you
can directly call methods without the need to know where
it actually gets executed::
>>> from gosa.common.components.mqtt_proxy import MQTTServiceProxy
>>> proxy = MQTTServiceProxy('localhost')
>>> proxy.getMethods()
This will return a dictionary describing the available methods.
=============== ============
Parameter Description
=============== ============
mqttHandler MQTTHandler used to connect to the MQTT service broker
serviceAddress Address string describing the target queue to bind to, must be skipped if no special queue is needed
serviceName *internal*
methods *internal*
=============== ============
The MQTTService proxy creates a temporary MQTT *reply to* queue, which
is used for command results.
"""
worker = {}
def __init__(self, mqttHandler=None, serviceAddress=None, serviceName=None,
methods=None):
self.__handler = mqttHandler if mqttHandler is not None else MQTTHandler()
self.__serviceName = serviceName
self.__serviceAddress = serviceAddress
self.__methods = methods
self.env = Environment.getInstance()
# Retrieve methods
if self.__methods is None:
self.__serviceName = "getMethods"
self.__methods = self.__call__()
self.__serviceName = None
#pylint: disable=W0613
def login(self, user, password): # pragma: nocover
return True
def logout(self): # pragma: nocover
return True
def close(self): # pragma: nocover
pass
def getProxy(self):
return MQTTServiceProxy(self.__handler, self.__serviceAddress, None, methods=self.__methods)
def __getattr__(self, name):
if self.__serviceName is not None:
name = "%s/%s" % (self.__serviceName, name)
return MQTTServiceProxy(self.__handler, self.__serviceAddress, name, methods=self.__methods)
@gen.coroutine
def __call__(self, *args, **kwargs):
data = {}
if '__user__' in kwargs:
data['user'] = kwargs['__user__']
del kwargs['__user__']
if '__session_id__' in kwargs:
data['session_id'] = kwargs['__session_id__']
del kwargs['__session_id__']
# Default to 'core' queue
call_id = uuid.uuid4()
topic = "%s/%s" % (self.__serviceAddress, call_id)
if isinstance(self.__methods, Future):
self.__methods = yield self.__methods
if self.__methods and self.__serviceName not in self.__methods:
raise NameError("name '%s' not defined" % self.__serviceName)
# Send
data.update({
"method": self.__serviceName,
"id": "mqttrpc",
"sender": self.env.uuid
})
data["kwparams"] = kwargs
data["params"] = args
postdata = dumps(data)
response = yield self.__handler.send_sync_message(postdata, topic, qos=2)
resp = loads(response)
if 'error' in resp and resp['error'] is not None:
raise JSONRPCException(resp['error'])
return resp['result']
| lgpl-2.1 | 8,577,437,640,126,136,000 | 31.198347 | 120 | 0.61037 | false | 4.198276 | false | false | false |
Sightline-Networks/email_bar | email_bar.py | 1 | 1124 | #!/bin/python3
from argparse import ArgumentParser
from configparser import ConfigParser
from os.path import expanduser
from mailbox import MaildirMessage, Maildir
config = ConfigParser()
config_mailboxes = {}
config.read(expanduser('~/.config/email_bar.cfg'))
parser = ArgumentParser()
parser.add_argument('--only', help='only check specified mailbox', action='store')
parser.add_argument('--no-title', help='do not display the title', action='store_true')
args = parser.parse_args()
if args.only:
config_mailboxes[args.only] = 0
# Else read through the config and check all of the mailboxes
else:
for mailbox in config.sections():
config_mailboxes[mailbox] = 0
# Iter through and see what has not been read
for mailbox in config_mailboxes:
maildir = Maildir(config.get(mailbox, 'path'))
for mail in maildir:
if 'S' not in mail.get_flags():
config_mailboxes[mailbox] += 1
for mailbox in config_mailboxes.keys():
if args.no_title:
print(config_mailboxes[mailbox])
else:
print("%s: %s " % (mailbox, config_mailboxes[mailbox]))
| gpl-2.0 | -6,381,215,655,860,284,000 | 22.416667 | 87 | 0.689502 | false | 3.721854 | true | false | false |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/kafka/protocol/admin.py | 1 | 7824 | from __future__ import absolute_import
from .api import Request, Response
from .types import Array, Boolean, Bytes, Int16, Int32, Schema, String
class ApiVersionResponse_v0(Response):
API_KEY = 18
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16)))
)
class ApiVersionResponse_v1(Response):
API_KEY = 18
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16))),
('throttle_time_ms', Int32)
)
class ApiVersionRequest_v0(Request):
API_KEY = 18
API_VERSION = 0
RESPONSE_TYPE = ApiVersionResponse_v0
SCHEMA = Schema()
class ApiVersionRequest_v1(Request):
API_KEY = 18
API_VERSION = 1
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
ApiVersionRequest = [ApiVersionRequest_v0, ApiVersionRequest_v1]
ApiVersionResponse = [ApiVersionResponse_v0, ApiVersionResponse_v1]
class CreateTopicsResponse_v0(Response):
API_KEY = 19
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class CreateTopicsResponse_v1(Response):
API_KEY = 19
API_VERSION = 1
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v2(Response):
API_KEY = 19
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsRequest_v0(Request):
API_KEY = 19
API_VERSION = 0
RESPONSE_TYPE = CreateTopicsResponse_v0
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32)
)
class CreateTopicsRequest_v1(Request):
API_KEY = 19
API_VERSION = 1
RESPONSE_TYPE = CreateTopicsResponse_v1
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreateTopicsRequest_v2(Request):
API_KEY = 19
API_VERSION = 2
RESPONSE_TYPE = CreateTopicsResponse_v2
SCHEMA = CreateTopicsRequest_v1.SCHEMA
CreateTopicsRequest = [
CreateTopicsRequest_v0, CreateTopicsRequest_v1, CreateTopicsRequest_v2
]
CreateTopicsResponse = [
CreateTopicsResponse_v0, CreateTopicsResponse_v1, CreateTopicsResponse_v2
]
class DeleteTopicsResponse_v0(Response):
API_KEY = 20
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v1(Response):
API_KEY = 20
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsRequest_v0(Request):
API_KEY = 20
API_VERSION = 0
RESPONSE_TYPE = DeleteTopicsResponse_v0
SCHEMA = Schema(
('topics', Array(String('utf-8'))),
('timeout', Int32)
)
class DeleteTopicsRequest_v1(Request):
API_KEY = 20
API_VERSION = 1
RESPONSE_TYPE = DeleteTopicsResponse_v1
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
DeleteTopicsRequest = [DeleteTopicsRequest_v0, DeleteTopicsRequest_v1]
DeleteTopicsResponse = [DeleteTopicsResponse_v0, DeleteTopicsResponse_v1]
class ListGroupsResponse_v0(Response):
API_KEY = 16
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v1(Response):
API_KEY = 16
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest_v0(Request):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse_v0
SCHEMA = Schema()
class ListGroupsRequest_v1(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v1
SCHEMA = ListGroupsRequest_v0.SCHEMA
ListGroupsRequest = [ListGroupsRequest_v0, ListGroupsRequest_v1]
ListGroupsResponse = [ListGroupsResponse_v0, ListGroupsResponse_v1]
class DescribeGroupsResponse_v0(Response):
API_KEY = 15
API_VERSION = 0
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v1(Response):
API_KEY = 15
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest_v0(Request):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse_v0
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
class DescribeGroupsRequest_v1(Request):
API_KEY = 15
API_VERSION = 1
RESPONSE_TYPE = DescribeGroupsResponse_v1
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
DescribeGroupsRequest = [DescribeGroupsRequest_v0, DescribeGroupsRequest_v1]
DescribeGroupsResponse = [DescribeGroupsResponse_v0, DescribeGroupsResponse_v1]
class SaslHandShakeResponse_v0(Response):
API_KEY = 17
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('enabled_mechanisms', Array(String('utf-8')))
)
class SaslHandShakeRequest_v0(Request):
API_KEY = 17
API_VERSION = 0
RESPONSE_TYPE = SaslHandShakeResponse_v0
SCHEMA = Schema(
('mechanism', String('utf-8'))
)
SaslHandShakeRequest = [SaslHandShakeRequest_v0]
SaslHandShakeResponse = [SaslHandShakeResponse_v0]
| mit | -3,200,656,330,581,579,300 | 25.255034 | 79 | 0.576176 | false | 3.564465 | true | false | false |
kylef/bluepaste | bluepaste/models.py | 1 | 2381 | import datetime
import json
from hashlib import sha1
import requests
import peewee
from rivr_peewee import Database
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import JsonLexer
from bluepaste.lexer import BlueprintLexer
from bluepaste.config import BLUEPRINT_PARSER_URL
database = Database()
EXPIRE_CHOICES = (
(600, 'In 10 minutes'),
(3600, 'In one hour'),
(3600*24, 'In one day'),
(3600*24*7, 'In one week'),
(3600*24*14, 'In two weeks'),
(3600*24*30, 'In one month'),
)
EXPIRE_DEFAULT = 3600*24*14
class User(database.Model):
email = peewee.CharField(unique=True)
class Blueprint(database.Model):
slug = peewee.CharField(max_length=40, unique=True)
expires = peewee.DateTimeField()
author = peewee.ForeignKeyField(User, related_name='blueprints', null=True)
def create_revision(self, message, content):
ast = requests.post(BLUEPRINT_PARSER_URL, data=content).json()['ast']
ast_json = json.dumps(ast)
created_at = datetime.datetime.now()
slug_content = '{}\n{}'.format(created_at.isoformat(), content)
slug = sha1(slug_content).hexdigest()
return Revision.create(blueprint=self, slug=slug, content=content, message=message, ast_json=ast_json)
@property
def last_revision(self):
return self.revisions[0]
class Revision(database.Model):
blueprint = peewee.ForeignKeyField(Blueprint, related_name='revisions')
slug = peewee.CharField(max_length=40, unique=True)
content = peewee.TextField()
created_at = peewee.DateTimeField(default=datetime.datetime.now)
ast_json = peewee.TextField()
message = peewee.TextField(null=True)
class Meta:
order_by = ('-created_at',)
indexes = (
(('blueprint', 'slug'), True),
)
def __str__(self):
return self.content
@property
def highlighted_content(self):
return highlight(self.content, BlueprintLexer(), HtmlFormatter())
@property
def ast(self):
if not hasattr(self, '_ast'):
self._ast = json.loads(self.ast_json)
return self._ast
@property
def highlighted_ast(self):
ast = json.dumps(self.ast, sort_keys=True, indent=2, separators=(',', ': '))
return highlight(ast, JsonLexer(), HtmlFormatter())
| mit | 5,149,579,032,192,973,000 | 27.686747 | 110 | 0.660227 | false | 3.663077 | false | false | false |
jacquerie/inspire-dojson | inspire_dojson/hep/rules/bd0xx.py | 1 | 12752 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""DoJSON rules for MARC fields in 0xx."""
from __future__ import absolute_import, division, print_function
import re
from collections import defaultdict
import pycountry
from dojson import utils
from idutils import is_arxiv_post_2007, is_doi, is_handle, normalize_doi
from inspire_schemas.api import load_schema
from inspire_schemas.utils import normalize_arxiv_category
from inspire_utils.helpers import force_list
from ..model import hep, hep2marc
from ...utils import force_single_element, normalize_isbn
RE_LANGUAGE = re.compile('\/| or | and |,|=|\s+')
@hep.over('isbns', '^020..')
@utils.for_each_value
def isbns(self, key, value):
"""Populate the ``isbns`` key."""
def _get_medium(value):
def _normalize(medium):
schema = load_schema('hep')
valid_media = schema['properties']['isbns']['items']['properties']['medium']['enum']
medium = medium.lower().replace('-', '').replace(' ', '')
if medium in valid_media:
return medium
elif medium == 'ebook':
return 'online'
elif medium == 'paperback':
return 'softcover'
return ''
medium = force_single_element(value.get('b', ''))
normalized_medium = _normalize(medium)
return normalized_medium
def _get_isbn(value):
a_value = force_single_element(value.get('a', ''))
normalized_a_value = a_value.replace('.', '')
if normalized_a_value:
return normalize_isbn(normalized_a_value)
return {
'medium': _get_medium(value),
'value': _get_isbn(value),
}
@hep2marc.over('020', 'isbns')
@utils.for_each_value
def isbns2marc(self, key, value):
"""Populate the ``020`` MARC field."""
return {
'a': value.get('value'),
'b': value.get('medium'),
}
@hep.over('dois', '^0247.')
def dois(self, key, value):
"""Populate the ``dois`` key.
Also populates the ``persistent_identifiers`` key through side effects.
"""
def _get_first_non_curator_source(sources):
sources_without_curator = [el for el in sources if el.upper() != 'CURATOR']
return force_single_element(sources_without_curator)
def _get_material(value):
MATERIAL_MAP = {
'ebook': 'publication',
}
q_value = force_single_element(value.get('q', ''))
normalized_q_value = q_value.lower()
return MATERIAL_MAP.get(normalized_q_value, normalized_q_value)
def _is_doi(id_, type_):
return (not type_ or type_.upper() == 'DOI') and is_doi(id_)
def _is_handle(id_, type_):
return (not type_ or type_.upper() == 'HDL') and is_handle(id_)
dois = self.get('dois', [])
persistent_identifiers = self.get('persistent_identifiers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
material = _get_material(value)
schema = force_single_element(value.get('2', ''))
sources = force_list(value.get('9'))
source = _get_first_non_curator_source(sources)
if _is_doi(id_, schema):
dois.append({
'material': material,
'source': source,
'value': normalize_doi(id_),
})
else:
schema = 'HDL' if _is_handle(id_, schema) else schema
persistent_identifiers.append({
'material': material,
'schema': schema,
'source': source,
'value': id_,
})
self['persistent_identifiers'] = persistent_identifiers
return dois
@hep2marc.over('0247', '^dois$')
@utils.for_each_value
def dois2marc(self, key, value):
"""Populate the ``0247`` MARC field."""
return {
'2': 'DOI',
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
@hep2marc.over('0247', '^persistent_identifiers$')
@utils.for_each_value
def persistent_identifiers2marc(self, key, value):
"""Populate the ``0247`` MARC field."""
return {
'2': value.get('schema'),
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
@hep.over('texkeys', '^035..')
def texkeys(self, key, value):
"""Populate the ``texkeys`` key.
Also populates the ``external_system_identifiers`` and ``_desy_bookkeeping`` keys through side effects.
"""
def _is_oai(id_, schema):
return id_.startswith('oai:')
def _is_desy(id_, schema):
return id_ and schema in ('DESY',)
def _is_texkey(id_, schema):
return id_ and schema in ('INSPIRETeX', 'SPIRESTeX')
texkeys = self.get('texkeys', [])
external_system_identifiers = self.get('external_system_identifiers', [])
_desy_bookkeeping = self.get('_desy_bookkeeping', [])
values = force_list(value)
for value in values:
ids = force_list(value.get('a', ''))
other_ids = force_list(value.get('z', ''))
schema = force_single_element(value.get('9', ''))
for id_ in ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.insert(0, id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.insert(0, {
'schema': schema,
'value': id_,
})
for id_ in other_ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.append(id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.append({
'schema': schema,
'value': id_,
})
self['external_system_identifiers'] = external_system_identifiers
self['_desy_bookkeeping'] = _desy_bookkeeping
return texkeys
@hep2marc.over('035', '^texkeys$')
def texkeys2marc(self, key, value):
"""Populate the ``035`` MARC field."""
result = []
values = force_list(value)
if values:
value = values[0]
result.append({
'9': 'INSPIRETeX',
'a': value,
})
for value in values[1:]:
result.append({
'9': 'INSPIRETeX',
'z': value,
})
return result
@hep2marc.over('035', '^external_system_identifiers$')
def external_system_identifiers2marc(self, key, value):
"""Populate the ``035`` MARC field.
Also populates the ``970`` MARC field through side effects and an extra
``id_dict`` dictionary that holds potentially duplicate IDs that are
post-processed in a filter.
"""
def _is_scheme_cernkey(id_, schema):
return schema == 'CERNKEY'
def _is_scheme_spires(id_, schema):
return schema == 'SPIRES'
result_035 = self.get('035', [])
id_dict = self.get('id_dict', defaultdict(list))
result_970 = self.get('970', [])
values = force_list(value)
for value in values:
id_ = value.get('value')
schema = value.get('schema')
if _is_scheme_spires(id_, schema):
result_970.append({
'a': id_,
})
elif _is_scheme_cernkey(id_, schema):
result_035.append({
'9': 'CERNKEY',
'z': id_,
})
else:
id_dict[schema].append(id_)
self['970'] = result_970
self['id_dict'] = id_dict
return result_035
@hep.over('arxiv_eprints', '^037..')
def arxiv_eprints(self, key, value):
"""Populate the ``arxiv_eprints`` key.
Also populates the ``report_numbers`` key through side effects.
"""
def _get_clean_arxiv_eprint(id_):
return id_.split(':')[-1]
def _is_arxiv_eprint(id_, source):
return source == 'arXiv'
def _is_hidden_report_number(other_id, source):
return other_id
def _get_clean_source(source):
if source == 'arXiv:reportnumber':
return 'arXiv'
return source
arxiv_eprints = self.get('arxiv_eprints', [])
report_numbers = self.get('report_numbers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
other_id = force_single_element(value.get('z', ''))
categories = [normalize_arxiv_category(category) for category
in force_list(value.get('c'))]
source = force_single_element(value.get('9', ''))
if _is_arxiv_eprint(id_, source):
arxiv_eprints.append({
'categories': categories,
'value': _get_clean_arxiv_eprint(id_),
})
elif _is_hidden_report_number(other_id, source):
report_numbers.append({
'hidden': True,
'source': _get_clean_source(source),
'value': other_id,
})
else:
report_numbers.append({
'source': _get_clean_source(source),
'value': id_,
})
self['report_numbers'] = report_numbers
return arxiv_eprints
@hep2marc.over('037', '^arxiv_eprints$')
def arxiv_eprints2marc(self, key, values):
"""Populate the ``037`` MARC field.
Also populates the ``035`` and the ``65017`` MARC fields through side effects.
"""
result_037 = self.get('037', [])
result_035 = self.get('035', [])
result_65017 = self.get('65017', [])
for value in values:
arxiv_id = value.get('value')
arxiv_id = 'arXiv:' + arxiv_id if is_arxiv_post_2007(arxiv_id) else arxiv_id
result_037.append({
'9': 'arXiv',
'a': arxiv_id,
'c': force_single_element(value.get('categories')),
})
result_035.append({
'9': 'arXiv',
'a': 'oai:arXiv.org:' + value.get('value'),
})
categories = force_list(value.get('categories'))
for category in categories:
result_65017.append({
'2': 'arXiv',
'a': category,
})
self['65017'] = result_65017
self['035'] = result_035
return result_037
@hep2marc.over('037', '^report_numbers$')
@utils.for_each_value
def report_numbers2marc(self, key, value):
"""Populate the ``037`` MARC field."""
def _get_mangled_source(source):
if source == 'arXiv':
return 'arXiv:reportnumber'
return source
source = _get_mangled_source(value.get('source'))
if value.get('hidden'):
return {
'9': source,
'z': value.get('value'),
}
return {
'9': source,
'a': value.get('value'),
}
@hep.over('languages', '^041..')
def languages(self, key, value):
"""Populate the ``languages`` key."""
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages
@hep2marc.over('041', '^languages$')
@utils.for_each_value
def languages2marc(self, key, value):
"""Populate the ``041`` MARC field."""
return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
| gpl-3.0 | -6,248,574,016,785,603,000 | 28.587007 | 107 | 0.559912 | false | 3.674928 | false | false | false |
scopenco/netblock-tools | netnull.py | 1 | 3572 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Author: Andrey Skopenko <[email protected]>
'''A tool create ip route rules that blackhole networks by country code
(ex: RU CN etc.) For the correct execution of script need to download geip
database and country codes.'''
import csv
import sys
import optparse
import os.path
ROUTE_BIN = 'ip route'
MAXMIND_DB = \
'http://www.maxmind.com/download/geoip/database/GeoIPCountryCSV.zip'
COUTRY_DB = 'http://www.iso.org/iso/list-en1-semic-3.txt'
def main():
p = optparse.OptionParser(description=__doc__,
prog="netblock",
version="0.1",
usage="%prog [-cc] [-c] [-i] [-p] "
"[-d] [-a] country1 coutry2 ...")
p.add_option("--geoipdb",
help='Path to GeoIPCountryWhois.csv with GeoIP data',
default='GeoIPCountryWhois.csv')
p.add_option("--countrydb",
help='Path to country_names_and_code_elements_txt '
'with country codes',
default='country_names_and_code_elements_txt')
p.add_option("--cc",
action='store_true',
help='List of country codes')
p.add_option("--remove_nullroute", "-r",
help='Generate rules to remove subnets from ip route table',
action="store_true")
options, arguments = p.parse_args()
# show list of country codes
if options.cc:
if not os.path.isfile(options.countrydb):
print '%s not found! try command "wget %s"' % (
options.countrydb, COUTRY_DB)
sys.exit()
with open(options.countrydb) as f:
for line in f:
if line == "" or line.startswith("Country ") or \
";" not in line:
continue
c_name, c_code = line.strip().split(";")
c_name = ' '.join([part.capitalize() for part in
c_name.split(" ")])
print '%s\t%s' % (c_code, c_name)
return
# show help
if not arguments:
p.print_help()
sys.exit()
if not os.path.isfile(options.geoipdb):
print '%s not found! try ' \
'command "wget %s && unzip GeoIPCountryCSV.zip"' % (
options.geoipdb, MAXMIND_DB)
sys.exit()
# construct route rule tempate
base_rule = ROUTE_BIN
if options.remove_nullroute:
block_rule = base_rule + ' del blackhole %s'
else:
block_rule = base_rule + ' add blackhole %s'
# get country networks and show iptables rules
with open(options.geoipdb, 'rb') as f:
for i in csv.reader(f):
if i[4] in arguments:
network = int(i[2])
mask = int(i[3])
while (network <= mask):
x = 0
while True:
if network & (1 << x) == 0 and \
network + ((1 << (x + 1)) - 1) <= mask:
x += 1
continue
print block_rule % '%s/%s' % (get_net(network), 32 - x)
break
network += 1 << x
def get_net(network):
'''convert bin network to decimal'''
out = str(network & 255)
for x in range(3):
network = network >> 8
out = '%s.%s' % (str(network & 255), out)
return out
if __name__ == "__main__":
main()
| bsd-3-clause | 179,938,823,875,172,540 | 34.019608 | 79 | 0.493001 | false | 3.951327 | false | false | false |
ju1ius/clisnips | clisnips/tui/widgets/progress/process.py | 1 | 2218 | import multiprocessing
import os
import signal
from clisnips.tui.logging import logger
from .message_queue import MessageQueue
class Process(multiprocessing.Process):
def __init__(self, message_queue: MessageQueue, target, args=(), kwargs=None):
super().__init__(target=target, args=args, kwargs=kwargs or {})
self._stop_event = multiprocessing.Event()
self._message_queue = message_queue
def stop(self):
logger.debug('Stopping process %s', self.pid)
self._stop_event.set()
# allow garbage collection
if self._message_queue:
self._message_queue = None
self._target.message_queue = None
def kill(self):
self.stop()
if self.is_alive():
logger.debug('Killing process %s', self.pid)
try:
os.killpg(self.pid, signal.SIGKILL)
except OSError as err:
os.kill(self.pid, signal.SIGKILL)
def run(self):
logger.debug('Starting process %s', self.pid)
# pass the queue object to the function object
self._target.message_queue = self._message_queue
self._message_queue.start()
self._message_queue.progress(0.0)
try:
self._do_run_task()
except KeyboardInterrupt as e:
logger.debug('Process %s catched KeyboardInterrupt', self.pid)
self._message_queue.cancel()
except Exception as err:
msg = ' '.join(err.args) if len(err.args) else str(err)
self._message_queue.error(msg)
finally:
self._message_queue.finish()
self._message_queue.close()
def _do_run_task(self):
for msg in self._target(*self._args, **self._kwargs):
if isinstance(msg, float):
self._message_queue.progress(msg)
elif isinstance(msg, str):
self._message_queue.message(msg)
if self._stop_event.is_set():
self._message_queue.cancel()
logger.debug('Cancelled process %s', self.pid)
break
class BlockingProcess(Process):
def _do_run_task(self):
self._target(*self._args, **self._kwargs)
| gpl-3.0 | 8,363,412,266,626,529,000 | 32.606061 | 82 | 0.584761 | false | 4.099815 | false | false | false |
sanyaade-mobiledev/renderer-service-upnp | test/rendererconsole.py | 1 | 5827 | # -*- coding: utf-8 -*-
# renderer-console
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Sébastien Bianti <[email protected]>
#
import dbus
import json
import xml.etree.ElementTree as ET
ROOT_OBJECT_PATH = '/com/intel/RendererServiceUPnP'
RENDERER_BUS = 'com.intel.renderer-service-upnp'
PROPS_IF_NAME = 'org.freedesktop.DBus.Properties'
INTROSPECTABLE_IF_NAME = 'org.freedesktop.DBus.Introspectable'
DEVICE_IF_NAME = 'com.intel.UPnP.RendererDevice'
PUSH_HOST_IF_NAME = 'com.intel.RendererServiceUPnP.PushHost'
MANAGER_INTERFACE = 'com.intel.RendererServiceUPnP.Manager'
MEDIAPLAYER2_IF_NAME = 'org.mpris.MediaPlayer2'
PLAYER_IF_NAME = 'org.mpris.MediaPlayer2.Player'
global bus_type
bus_type = dbus.SessionBus()
def print_json(props):
print json.dumps(props, indent=4, sort_keys=True)
def get_interface(path, if_name):
return dbus.Interface(bus_type.get_object(RENDERER_BUS, path), if_name)
class Renderer(object):
"Represent a renderer service"
def __init__(self, object_path):
self.__path = object_path
self.__propsIF = get_interface(object_path, PROPS_IF_NAME)
self.__playerIF = get_interface(object_path, PLAYER_IF_NAME)
self.__pushhostIF = get_interface(object_path, PUSH_HOST_IF_NAME)
def get_interfaces(self):
try:
introspectable_IF = get_interface(self.__path,
INTROSPECTABLE_IF_NAME)
except:
print(u"Failed to retrieve introspectable interface")
introspection = introspectable_IF.Introspect()
tree = ET.fromstring(introspection)
return [i.attrib['name'] for i in tree if i.tag == "interface"]
def interfaces(self):
for i in self.get_interfaces():
print i
def get_prop(self, prop_name, inner_if_name = ""):
return self.__propsIF.Get(inner_if_name, prop_name)
def get_props(self, inner_if_name = ""):
return self.__propsIF.GetAll(inner_if_name)
def print_props(self, inner_if_name = ""):
print_json(self.get_props(inner_if_name))
def set_prop(self, prop_name, if_name, val):
"""
Sets only the following properties :
Rate and Volume
"""
return self.__propsIF.Set(if_name, prop_name, val)
# Control methods
def play(self):
self.__playerIF.Play()
def pause(self):
self.__playerIF.Pause()
def play_pause(self):
self.__playerIF.PlayPause()
def next(self):
self.__playerIF.Next()
def open_uri(self, uri):
self.__playerIF.OpenUri(uri)
def previous(self):
self.__playerIF.Previous()
def seek(self, offset):
self.__playerIF.Seek(offset)
def goto_track(self, trackID):
self.__playerIF.GotoTrack(trackID)
def set_position(self, trackID, position):
self.__playerIF.setPosition(trackID, position)
def stop(self):
self.__playerIF.Stop()
# Push Host methods
def host_file(self, path):
return self.__pushhostIF.HostFile(path)
def remove_file(self, path):
self.__pushhostIF.RemoveFile(path)
class Manager(object):
"""
High level class for detecting Renderers and doing common operations
on RendererServiceUPnP
"""
def __init__(self):
self.__manager = get_interface(ROOT_OBJECT_PATH, MANAGER_INTERFACE)
self.__renderers = []
def update_renderers(self):
self.__renderers = self.__manager.GetServers()
def get_renderers(self):
self.update_renderers()
return self.__renderers
def renderers(self):
self.update_renderers()
for path in self.__renderers:
try:
renderer = Renderer(path)
renderer_name = renderer.get_prop("Identity")
print(u"%s : %s" % (path, renderer_name))
except:
print(u"Failed to retrieve Identity for interface %s" % path)
def get_version(self):
return self.__manager.GetVersion()
def version(self):
print self.get_version()
def release(self):
self.__manager.Release()
if __name__ == "__main__":
print("\n\t\t\tExample for using rendererconsole:")
print("\t\t\t¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n")
manager = Manager()
print("Version = %s" % manager.get_version())
print("¯¯¯¯¯¯¯")
print "\nRenderer's list:"
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯")
manager.renderers()
renderer_list = manager.get_renderers()
for name in renderer_list:
renderer = Renderer(name)
interface_list = renderer.get_interfaces()
print("\nInterfaces of %s:" % name)
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + "¯" * len(name))
for i in interface_list:
print i
if_name = DEVICE_IF_NAME
if (if_name in interface_list) :
print("\nProperties of %s on %s:" % (if_name, name))
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + (len(name) + len(if_name)) * "¯")
renderer.print_props(if_name)
| lgpl-2.1 | -1,308,506,136,399,219,500 | 28.420513 | 79 | 0.62402 | false | 3.300921 | false | false | false |
fresskarma/tinyos-1.x | tools/python/pytos/util/RoutingMessages.py | 1 | 7725 | # "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
"""\
RoutingMessages: This is a package of classes that have some functionality
commonly used by routing messages, eg. Rpc and RamSymbol.
This class is not intended to be used on its own.
"""
import sys, string, time, types
import pytos.util.nescDecls as nescDecls
import pytos.Comm as Comm
import pytos.tools.Drip as Drip
import pytos.tools.Drain as Drain
from copy import deepcopy
class RoutingMessage( nescDecls.TosMsg ) :
def __init__(self, parent, amType, *structArgs) :
#store the parent
self.parent = parent
#initialize the default call parameters to none (ie, use the parent's defaults)
for (callParam,default) in self.parent.defaultCallParams :
self.__dict__[callParam] = None
nescDecls.TosMsg.__init__(self, parent.app.enums.AM_RPCCOMMANDMSG, *structArgs)
def _assignParam(self, field, param, paramId) :
"""assign a call parameter to the correct field (checking types)"""
if type(field) == nescDecls.nescType and (
type(param) == int or type(param) == long or
type(param) == float or type(param) == str or
type(param) == unicode ) :
field.value = param
elif type(field) == type(param) :
field = param
else :
raise Exception("Illegal parameter type for param #%s. Requires type %s." % (
str(paramId), str(type(field))) )
def _send(self, address, *posArgs, **nameArgs) :
commArgs = ()
#posArgs and nameArgs now contain only field values.
#now assign them to the appropriate RoutingMessage fields.
#create a temporary RoutingMessage to hold the call-time parameters
thisCall = deepcopy(self)
for i in range(len(posArgs)) :
thisCall._assignParam(thisCall.value[thisCall.fields[i+1]["name"]], posArgs[i], i)
for key in nameArgs.keys() :
if not thisCall.value.has_key(key) :
raise Exception("parameter name %s non-existent" % key)
thisCall._assignParam(thisCall.value[key], nameArgs[key], key)
thisCall.parent.sendComm.send(address, thisCall, *commArgs)
def parseCallParams(self, nameArgs) :
callParams = self.getCallParams()
#parse any call-time call parameters
for param in nameArgs.keys() :
if callParams.has_key(param) :
callParams[param] = nameArgs[param]
del nameArgs[param]
return callParams
def getCallParams(self) :
"""Use the default call parameters from the parent module, but if I have the same
field with a non-None value, use it instead"""
callParams = self.parent.getCallParams()
for param in callParams.keys() :
if self.__dict__.has_key(param) and self.__getattribute__(param) != None :
callParams[param] = self.__getattribute__(param)
return callParams
def __repr__(self) :
"""full function name"""
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def register(self, listener, comm=()) :
self.parent.receiveComm.register(self, listener, *comm)
def unregister(self, listener, comm=()) :
self.parent.receiveComm.unregister(self, listener, *comm)
class Shortcut (object):
"""used to allow multiple levels of indirection w/routing messages using dots;
ie., to allow something.module.interface.RoutingMessage()"""
def __init__(self, parent, name):
self.parent = parent
self.name = name
def __getattr__(self, name) :
name = self.name + "." + name
if self.parent._messages.has_key(name) :
return self.parent._messages.get(name)
else :
for message in self.parent._messages.values() :
if message.nescType.find(name+".") == 0 :
return Shortcut(self.parent,name)
raise Exception("Cannot find %s. Check spelling." % name)
def __repr__(self):
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def __str__(self):
string = ""
funcs = ()
messageNames = self.parent._messages.keys()
messageNames.sort()
for message in messageNames :
if message.find(self.name) == 0 :
string += str(self.parent._messages[message])
string = string.replace(self.name + "." , "" )
return string
class RoutingMessages(object) :
def __init__(self, app) :
self.app = app
self._messages = {}
## In this constructor, we connect to the routing layer as best as
## we can. This may mean creating new drip/drain instances,
## reusing old ones, reusing old Comm objects, or not connecting
## at all, depending...
if app.motecom == None:
return
#connect to sendComm: use localComm if user requested or if drip not compiled in.
self.address=app.enums.TOS_BCAST_ADDR
if app.localCommOnly==True or "AM_DRIPMSG" not in app.enums._enums:
self.sendComm = Comm.getCommObject(app, app.motecom)
else :
self.sendComm = Drip.getDripObject(app, app.motecom, app.enums.AM_RPCCOMMANDMSG)[0]
#connect to receiveComm: always use Drain unless not compiled in
if "AM_DRAINMSG" not in app.enums._enums:
self.receiveComm = Comm.getCommObject(app, app.motecom)
self.returnAddress = app.enums.TOS_BCAST_ADDR
else :
treeID = 0xfffe #can we set this automatically?
self.receiveComm = Drain.getDrainObject(app, app.motecom, treeID)[0]
if app.localCommOnly == False :
self.receiveComm.maintainTree()
if app.tosbase==True: #can we discover this like deluge?
self.returnAddress = treeID
else :
self.returnAddress = app.enums.TOS_UART_ADDR
def initializeCallParams(self, callParams) :
for (callParam,defaultVal) in self.defaultCallParams :
if callParams.has_key(callParam) :
self.__dict__[callParam] = callParams[callParam]
elif not self.__dict__.has_key(callParam):
self.__dict__[callParam] = defaultVal
def getCallParams(self) :
callParams = {}
for (callParam,default) in self.defaultCallParams :
callParams[callParam] = self.__dict__[callParam]
return callParams
def __getattr__(self, name) :
for function in self._messages.values() :
if function.nescType.find(name + ".") == 0 :
return Shortcut(self,name)
raise AttributeError("No such attribute %s" % name)
def __repr__(self) :
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def __str__(self) :
""" Print all available RoutingMessages."""
string = ""
keys = self._messages.keys()
keys.sort()
for name in keys :
string += str( self._messages[name])
return string
| bsd-3-clause | 4,245,047,028,988,728,300 | 36.139423 | 95 | 0.667961 | false | 3.640434 | false | false | false |
newsages/nQTrucks | ntrain/puertoct/truck2/prep.py | 1 | 5615 | #!/usr/bin/python
import os
from PIL import Image
import uuid
import shutil
import sys
WIDTH=52
HEIGHT=13
COUNTRY='trucks2'
#WIDTH=52
#HEIGHT=13
#COUNTRY='eu'
#constants
OPENCV_DIR= '/usr/bin'
SAMPLE_CREATOR = OPENCV_DIR + '/opencv_createsamples'
BASE_DIR = './'
OUTPUT_DIR = BASE_DIR + "out/"
INPUT_NEGATIVE_DIR = BASE_DIR + 'neg/'
INPUT_POSITIVE_DIR = BASE_DIR + COUNTRY + '/'
OUTPUT_NEGATIVE_DIR = BASE_DIR + 'negative/'
OUTPUT_POSITIVE_DIR = BASE_DIR + 'positive/'
POSITIVE_INFO_FILE = OUTPUT_POSITIVE_DIR + 'positive.txt'
NEGATIVE_INFO_FILE = OUTPUT_NEGATIVE_DIR + 'negative.txt'
VEC_FILE = OUTPUT_POSITIVE_DIR + 'vecfile.vec'
vector_arg = '-vec %s' % (VEC_FILE)
width_height_arg = '-w %d -h %d' % (WIDTH, HEIGHT)
def print_usage():
print "Usage: prep.py [Operation]"
print " -- Operations --"
print " neg -- Prepares the negative samples list"
print " pos -- Copies all the raw positive files to a opencv vector"
print " showpos -- Shows the positive samples that were created"
print " train -- Outputs the command for the Cascade Training algorithm"
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
command=""
if command != "":
pass
elif len(sys.argv) != 2:
print_usage()
exit()
else:
command = sys.argv[1]
if command == "neg":
print "Neg"
# Get rid of any spaces
for neg_file in os.listdir(INPUT_NEGATIVE_DIR):
if " " in neg_file:
fileName, fileExtension = os.path.splitext(neg_file)
newfilename = str(uuid.uuid4()) + fileExtension
#print "renaming: " + files + " to "+ root_dir + "/" + str(uuid.uuid4()) + fileExtension
os.rename(INPUT_NEGATIVE_DIR + neg_file, INPUT_POSITIVE_DIR + newfilename)
f = open(NEGATIVE_INFO_FILE,'w')
## Write a list of all the negative files
for neg_file in os.listdir(INPUT_NEGATIVE_DIR):
if os.path.isdir(INPUT_NEGATIVE_DIR + neg_file):
continue
shutil.copy2(INPUT_NEGATIVE_DIR + neg_file, OUTPUT_NEGATIVE_DIR + neg_file )
f.write(neg_file + "\n")
f.close()
elif command == "pos":
print "Pos"
info_arg = '-info %s' % (POSITIVE_INFO_FILE)
# Copy all files in the raw directory and build an info file
## Remove all files in the output positive directory
for old_file in os.listdir(OUTPUT_POSITIVE_DIR):
os.unlink(OUTPUT_POSITIVE_DIR + old_file)
## First, prep the sample filenames (make sure they have no spaces)
for files in os.listdir(INPUT_POSITIVE_DIR):
if os.path.isdir(INPUT_POSITIVE_DIR + files):
continue
# Rename the file if it has a space in it
newfilename = files
if " " in files:
fileName, fileExtension = os.path.splitext(files)
newfilename = str(uuid.uuid4()) + fileExtension
#print "renaming: " + files + " to "+ root_dir + "/" + str(uuid.uuid4()) + fileExtension
os.rename(INPUT_POSITIVE_DIR + files, INPUT_POSITIVE_DIR + newfilename)
# Copy from the raw directory to the positive directory
shutil.copy2(INPUT_POSITIVE_DIR + newfilename, OUTPUT_POSITIVE_DIR + newfilename )
total_pics = 0
## Create the positive.txt input file
f = open(POSITIVE_INFO_FILE,'w')
for filename in os.listdir(OUTPUT_POSITIVE_DIR):
if os.path.isdir(OUTPUT_POSITIVE_DIR + filename):
continue
if filename.endswith(".txt"):
continue
try:
img = Image.open(OUTPUT_POSITIVE_DIR + filename)
# get the image's width and height in pixels
width, height = img.size
f.write(filename + " 1 0 0 " + str(width) + " " + str(height) + '\n')
total_pics = total_pics + 1
except IOError:
print "Exception reading image file: " + filename
f.close()
# Collapse the samples into a vector file
execStr = '%s/opencv_createsamples %s %s %s -num %d' % (OPENCV_DIR, vector_arg, width_height_arg, info_arg, total_pics )
print execStr
os.system(execStr)
#opencv_createsamples -info ./positive.txt -vec ../positive/vecfile.vec -w 120 -h 60 -bg ../negative/PentagonCityParkingGarage21.jpg -num 100
elif command == "showpos":
print "SHOW"
execStr = '%s/opencv_createsamples -vec %s -w %d -h %d' % (OPENCV_DIR, VEC_FILE, WIDTH, HEIGHT )
print execStr
os.system(execStr)
#opencv_createsamples -vec ../positive/vecfile.vec -w 120 -h 60
elif command == "train":
print "TRAIN"
data_arg = '-data %s/' % (OUTPUT_DIR)
bg_arg = '-bg %s' % (NEGATIVE_INFO_FILE)
try:
num_pos_samples = file_len(POSITIVE_INFO_FILE)
except:
num_pos_samples = -1
num_neg_samples = file_len(NEGATIVE_INFO_FILE)
execStr = '%s/opencv_traincascade %s %s %s %s -numPos %d -numNeg %d -maxFalseAlarmRate 0.45 -featureType LBP -numStages 13' % (OPENCV_DIR, data_arg, vector_arg, bg_arg, width_height_arg, num_pos_samples, num_neg_samples )
print "Execute the following command to start training:"
print execStr
#opencv_traincascade -data ./out/ -vec ./positive/vecfile.vec -bg ./negative/negative.txt -w 120 -h 60 -numPos 99 -numNeg 5 -featureType LBP -numStages 8
#opencv_traincascade -data ./out/ -vec ./positive/vecfile.vec -bg ./negative/negative.txt -w 120 -h 60 -numPos 99 -numNeg 5 -featureType LBP -numStages 20
elif command == "SDFLSDFSDFSDF":
root_dir = '/home/mhill/projects/anpr/AlprPlus/samples/svm/raw-pos'
outputfilename = "positive.txt"
else:
print_usage()
exit()
| gpl-3.0 | -3,035,221,647,513,750,500 | 28.708995 | 225 | 0.636331 | false | 3.201254 | false | false | false |
marnnie/Cable-buenaventura | plugin.video.genesis/resources/lib/sources/mvsnap_mv_tv.py | 1 | 4444 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,json
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://mvsnap.com'
self.search_link = '/v1/api/search?query=%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies'][0]['slug']
url = '/movies/%s' % result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies']
season = '%02d' % int(season)
episode = '%02d' % int(episode)
result = [(i['slug'], i['long_title']) for i in result]
result = [(i[0], re.compile('(\d*)$').findall(i[1])) for i in result]
result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
result = [i[0] for i in result if season == i[1]][0]
url = '/tv-shows/%s?S%sE%s' % (result, season, episode)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
query = urlparse.urlparse(url).query
try: query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
except: query = ''
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
result = [i[0] for i in result if i[1].endswith(query) or query == ''][0]
direct = re.compile('(.+)[|](.+?)[,]').findall(result)
if len(direct) > 0:
quality = 'HD' if 'hd' in direct[0][0].lower() else 'SD'
sources.append({'source': 'GVideo', 'quality': quality, 'provider': 'MVsnap', 'url': direct[0][1]})
return sources
url = urlparse.urljoin(self.base_link, result)
url = client.source(url, output='geturl')
if not 'google' in url: raise Exception()
url = url.split('get_video_info')[0]
url = resolvers.request(url)
for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})
return sources
except:
return sources
def resolve(self, url):
try:
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| gpl-2.0 | 2,883,124,037,299,441,000 | 31.437956 | 126 | 0.54928 | false | 3.932743 | false | false | false |
metabrainz/botbot-web | botbot/apps/plugins/core/logger.py | 1 | 2002 | import re
from botbot.apps.logs.models import Log
from botbot_plugins.base import BasePlugin
import botbot_plugins.config as config
class Config(config.BaseConfig):
ignore_prefixes = config.Field(
default=["!-"],
required=False,
help_text="""
Specify a list of regular expressions which match
the start of messages to be ignored (excluded from the logs)
"""
)
def should_ignore_text(text, ignore_prefixes):
return any(
(
prefix and
re.match(prefix, text, flags=re.IGNORECASE) is not None
)
for prefix in ignore_prefixes
)
class Plugin(BasePlugin):
"""
Logs all activity.
I keep extensive logs on all the activity in `{{ channel.name }}`.
You can read and search them at {{ SITE }}{{ channel.get_absolute_url }}.
"""
config_class = Config
def logit(self, line):
"""Log a message to the database"""
# If the channel does not start with "#" that means the message
# is part of a /query
if line._channel_name.startswith("#"):
ignore_prefixes = self.config['ignore_prefixes']
if ignore_prefixes:
if not isinstance(ignore_prefixes, list):
ignore_prefixes = [ignore_prefixes]
else:
ignore_prefixes = []
# Delete ACTION prefix created by /me
text = line.text
if text.startswith("ACTION "):
text = text[7:]
if not should_ignore_text(text, ignore_prefixes):
Log.objects.create(
channel_id=line._channel.pk,
timestamp=line._received,
nick=line.user,
text=line.full_text,
room=line._channel,
host=line._host,
command=line._command,
raw=line._raw)
logit.route_rule = ('firehose', ur'(.*)')
| mit | 801,682,193,017,813,800 | 28.880597 | 77 | 0.545455 | false | 4.46875 | true | false | false |
evanmiltenburg/Dutch-corpora | overheid/scripts/make_xml_plain.py | 1 | 1350 | from bs4 import BeautifulSoup
import nltk.data
from nltk.tokenize import word_tokenize
import glob
import gzip
import sys
tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle')
def good_sentence(s):
if len(s) < 4 or s.count(',') > 4:
return False
else:
digits = filter(lambda x:x.isdigit(),s)
if len(digits) > (float(len(s))/2):
return False
else:
return True
def sentences_for_file(filename):
with open(filename) as f:
soup = BeautifulSoup(f)
pars = filter(lambda p: not p == None,
map(lambda x:x.get_text(), soup.find_all('al')))
sentences = [word_tokenize(sentence) for x in pars
for sentence in tokenizer.tokenize(x)]
return [' '.join(s).encode('utf-8') for s in filter(good_sentence, sentences)]
def main(ftype):
with gzip.open('../corpus/' + ftype + '_plain.txt.gz','w') as f:
for filename in glob.glob('../data/' + ftype + '/*/*.xml'):
f.write('\n'.join(sentences_for_file(filename)))
if __name__ == "__main__":
ftypes = {'kst', 'trb', 'stb', 'ag', 'ah', 'stcrt', 'kv', 'h', 'blg', 'nds'}
ftype = sys.argv[1]
if ftype in ftypes:
main(ftype)
else:
raise KeyError('No known folder of that type. (You entered: '+ftype + ')')
| apache-2.0 | 3,827,033,118,878,343,000 | 31.926829 | 86 | 0.568148 | false | 3.452685 | false | false | false |
datapythonista/pandas | pandas/core/arrays/sparse/accessor.py | 2 | 11479 | """Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, data = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
data.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
tmp = np.mean([column.array.density for _, column in self._parent.items()])
return tmp
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
| bsd-3-clause | -6,695,402,877,092,622,000 | 29.287599 | 86 | 0.531492 | false | 3.959641 | false | false | false |
alanmcruickshank/superset-dev | superset/connectors/druid/models.py | 1 | 45334 | # pylint: disable=invalid-unary-operand-type
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
import json
import logging
from multiprocessing import Pool
from dateutil.parser import parse as dparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.aggregators import count
from pydruid.utils.filters import Bound, Dimension, Filter
from pydruid.utils.having import Aggregation
from pydruid.utils.postaggregator import (
Const, Field, HyperUniqueCardinality, Postaggregator, Quantile, Quantiles,
)
import requests
from six import string_types
import sqlalchemy as sa
from sqlalchemy import (
Boolean, Column, DateTime, ForeignKey, Integer, or_, String, Text,
)
from sqlalchemy.orm import backref, relationship
from superset import conf, db, import_util, sm, utils
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.models.helpers import AuditMixinNullable, QueryResult, set_perm
from superset.utils import (
DimSelector, DTTM_ALIAS, flasher, MetricPermException,
)
DRUID_TZ = conf.get('DRUID_TZ')
# Function wrapper because bound methods cannot
# be passed to processes
def _fetch_metadata_for(datasource):
return datasource.latest_metadata()
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class CustomPostAggregator(Postaggregator):
"""A way to allow users to specify completely custom PostAggregators"""
def __init__(self, name, post_aggregator):
self.name = name
self.post_aggregator = post_aggregator
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
type = 'druid'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer, default=8081)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer, default=8082)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
def __repr__(self):
return self.verbose_name if self.verbose_name else self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
'http://{0}:{1}/'.format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/'
'{obj.coordinator_endpoint}/datasources'
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/status'
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
self.druid_version = self.get_druid_version()
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh_async(ds_refresh, merge_flag, refreshAll)
def refresh_async(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources andm
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(or_(DruidDatasource.datasource_name == name
for name in datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
'Adding new datasource [{}]'.format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
'Refreshing datasource [{}]'.format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = Pool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == datasource.datasource_name)
.filter(or_(DruidColumn.column_name == col for col in cols))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=datasource.datasource_name,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
datatype = cols[col]['type']
if datatype == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
if datatype == 'hyperUnique' or datatype == 'thetaSketch':
col_obj.count_distinct = True
# Allow sum/min/max for long or double
if datatype == 'LONG' or datatype == 'DOUBLE':
col_obj.sum = True
col_obj.min = True
col_obj.max = True
col_obj.type = datatype
col_obj.datasource = datasource
datasource.generate_metrics_for(col_objs_list)
session.commit()
@property
def perm(self):
return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
def get_perm(self):
return self.perm
@property
def name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
@property
def unique_name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
class DruidColumn(Model, BaseColumn):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('columns', cascade='all, delete-orphan'),
enable_typechecks=False)
dimension_spec_json = Column(Text)
export_fields = (
'datasource_name', 'column_name', 'is_active', 'type', 'groupby',
'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable',
'description', 'dimension_spec_json',
)
def __repr__(self):
return self.column_name
@property
def expression(self):
return self.dimension_spec_json
@property
def dimension_spec(self):
if self.dimension_spec_json:
return json.loads(self.dimension_spec_json)
def get_metrics(self):
metrics = {}
metrics['count'] = DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'}),
)
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.is_num:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.avg and self.is_num:
mt = corrected_type.lower() + 'Avg'
name = 'avg__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='avg',
verbose_name='AVG({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.min and self.is_num:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.max and self.is_num:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name,
}),
)
else:
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]}),
)
return metrics
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(
DruidMetric.metric_name == m for m in metrics
))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
db.session.add(metric)
@classmethod
def import_obj(cls, i_column):
def lookup_obj(lookup_column):
return db.session.query(DruidColumn).filter(
DruidColumn.datasource_name == lookup_column.datasource_name,
DruidColumn.column_name == lookup_column.column_name).first()
return import_util.import_simple_obj(db.session, i_column, lookup_obj)
class DruidMetric(Model, BaseMetric):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'datasource_name',
'json', 'description', 'is_restricted', 'd3format',
)
@property
def expression(self):
return self.json
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
'{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.datasource.full_name,
) if self.datasource else None
@classmethod
def import_obj(cls, i_metric):
def lookup_obj(lookup_metric):
return db.session.query(DruidMetric).filter(
DruidMetric.datasource_name == lookup_metric.datasource_name,
DruidMetric.metric_name == lookup_metric.metric_name).first()
return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
class DruidDatasource(Model, BaseDatasource):
"""ORM object referencing Druid datasources (tables)"""
__tablename__ = 'datasources'
type = 'druid'
query_langtage = 'json'
cluster_class = DruidCluster
metric_class = DruidMetric
column_class = DruidColumn
baselink = 'druiddatasourcemodelview'
# Columns
datasource_name = Column(String(255), unique=True)
is_hidden = Column(Boolean, default=False)
fetch_values_from = Column(String(100))
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
sm.user_model,
backref=backref('datasources', cascade='all, delete-orphan'),
foreign_keys=[user_id])
export_fields = (
'datasource_name', 'is_hidden', 'description', 'default_endpoint',
'cluster_name', 'offset', 'cache_timeout', 'params',
)
@property
def database(self):
return self.cluster
@property
def connection(self):
return str(self.database)
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.is_num]
@property
def name(self):
return self.datasource_name
@property
def schema(self):
ds_name = self.datasource_name or ''
name_pieces = ds_name.split('.')
if len(name_pieces) > 1:
return name_pieces[0]
else:
return None
@property
def schema_perm(self):
"""Returns schema permission if present, cluster one otherwise."""
return utils.get_schema_perm(self.cluster, self.schema)
def get_perm(self):
return (
'[{obj.cluster_name}].[{obj.datasource_name}]'
'(id:{obj.id})').format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return utils.get_datasource_full_name(
self.cluster_name, self.datasource_name)
@property
def time_column_grains(self):
return {
'time_columns': [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
'time_grains': ['now'],
}
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@classmethod
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d):
return db.session.query(DruidDatasource).join(DruidCluster).filter(
DruidDatasource.datasource_name == d.datasource_name,
DruidCluster.cluster_name == d.cluster_name,
).first()
def lookup_cluster(d):
return db.session.query(DruidCluster).filter_by(
cluster_name=d.cluster_name).one()
return import_util.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource,
import_time)
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
logging.info('Syncing datasource [{}]'.format(self.datasource_name))
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = dparse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which triggered a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
lbound = (max_time - timedelta(days=7)).isoformat()
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = (max_time - timedelta(1)).isoformat()
else:
rbound = max_time.isoformat()
segment_metadata = None
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed first attempt to get latest segment')
logging.exception(e)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
lbound = datetime(1901, 1, 1).isoformat()[:10]
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = datetime.now().isoformat()
else:
rbound = datetime(2050, 1, 1).isoformat()[:10]
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed 2nd attempt to get latest segment')
logging.exception(e)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
self.generate_metrics_for(self.columns)
def generate_metrics_for(self, columns):
metrics = {}
for col in columns:
metrics.update(col.get_metrics())
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(DruidMetric.metric_name == m for m in metrics))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
with db.session.no_autoflush:
db.session.add(metric)
@classmethod
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == druid_config['name'])
.filter(or_(DruidColumn.column_name == dim for dim in dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_name == druid_config['name'])
.filter(or_(DruidMetric.metric_name == spec['name']
for spec in druid_config['metrics_spec']))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@staticmethod
def time_offset(granularity):
if granularity == 'week_ending_saturday':
return 6 * 24 * 3600 * 1000 # 6 days
return 0
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://druid.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(period_name, timezone=None, origin=None):
if not period_name or period_name == 'all':
return 'all'
iso_8601_dict = {
'5 seconds': 'PT5S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
'1 hour': 'PT1H',
'6 hour': 'PT6H',
'one day': 'P1D',
'1 day': 'P1D',
'7 days': 'P7D',
'week': 'P1W',
'week_starting_sunday': 'P1W',
'week_ending_saturday': 'P1W',
'month': 'P1M',
}
granularity = {'type': 'period'}
if timezone:
granularity['timeZone'] = timezone
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
if period_name in iso_8601_dict:
granularity['period'] = iso_8601_dict[period_name]
if period_name in ('week_ending_saturday', 'week_starting_sunday'):
# use Sunday as start of the week
granularity['origin'] = '2016-01-03T00:00:00'
elif not isinstance(period_name, string_types):
granularity['type'] = 'duration'
granularity['duration'] = period_name
elif period_name.startswith('P'):
# identify if the string is the iso_8601 period
granularity['period'] = period_name
else:
granularity['type'] = 'duration'
granularity['duration'] = utils.parse_human_timedelta(
period_name).total_seconds() * 1000
return granularity
@staticmethod
def _metrics_and_post_aggs(metrics, metrics_dict):
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_type = _conf.get('type')
_field = _conf.get('field')
_fields = _conf.get('fields')
field_names = []
if _type in ['fieldAccess', 'hyperUniqueCardinality',
'quantile', 'quantiles']:
field_names.append(_conf.get('fieldName', ''))
if _field:
field_names += recursive_get_fields(_field)
if _fields:
for _f in _fields:
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
mconf = metric.json_obj
all_metrics += recursive_get_fields(mconf)
all_metrics += mconf.get('fieldNames', [])
if mconf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
post_aggs[metric_name] = Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
post_aggs[metric_name] = Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
post_aggs[metric_name] = Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
post_aggs[metric_name] = Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
post_aggs[metric_name] = HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
post_aggs[metric_name] = Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
post_aggs[metric_name] = CustomPostAggregator(
mconf.get('name', ''),
mconf)
return all_metrics, post_aggs
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
def get_query_str(self, query_obj, phase=1, client=None):
return self.run_query(client=client, phase=phase, **query_obj)
def _add_filter_from_pre_query_data(self, df, dimensions, dim_filter):
ret = dim_filter
if df is not None and not df.empty:
new_filters = []
for unused, row in df.iterrows():
fields = []
for dim in dimensions:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
term = Filter(type='and', fields=fields)
new_filters.append(term)
elif fields:
new_filters.append(fields[0])
if new_filters:
ff = Filter(type='or', fields=new_filters)
if not dim_filter:
ret = ff
else:
ret = Filter(type='and', fields=[ff, dim_filter])
return ret
def run_query( # noqa / druid
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, phase=2, client=None, form_data=None,
order_desc=True):
"""Runs a query against Druid and returns a dataframe.
"""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
if not is_timeseries:
granularity = 'all'
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)
to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
timezone = from_dttm.tzname()
query_str = ''
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
all_metrics, post_aggs = self._metrics_and_post_aggs(
metrics,
metrics_dict)
aggregations = OrderedDict()
for m in self.metrics:
if m.metric_name in all_metrics:
aggregations[m.metric_name] = m.json_obj
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
'Access to the metrics denied: ' + ', '.join(rejected_metrics),
)
# the dimensions list with dimensionSpecs expanded
dimensions = []
groupby = [gb for gb in groupby if gb in columns_dict]
for column_name in groupby:
col = columns_dict.get(column_name)
dim_spec = col.dimension_spec
if dim_spec:
dimensions.append(dim_spec)
else:
dimensions.append(column_name)
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity,
timezone=timezone,
origin=extras.get('druid_time_origin'),
),
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = DruidDatasource.get_filters(filter, self.num_cols)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
order_direction = 'descending' if order_desc else 'ascending'
if len(groupby) == 0 and not having_filters:
del qry['dimensions']
client.timeseries(**qry)
if (
not having_filters and
len(groupby) == 1 and
order_desc and
not isinstance(list(qry.get('dimensions'))[0], dict)
):
dim = list(qry.get('dimensions'))[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
else:
order_by = list(qry['aggregations'].keys())[0]
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['threshold'] = min(row_limit,
timeseries_limit or row_limit)
pre_qry['metric'] = order_by
pre_qry['dimension'] = dim
del pre_qry['dimensions']
client.topn(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'], filters)
qry['threshold'] = timeseries_limit or 1000
if row_limit and granularity == 'all':
qry['threshold'] = row_limit
qry['dimension'] = list(qry.get('dimensions'))[0]
qry['dimension'] = dim
del qry['dimensions']
qry['metric'] = list(qry['aggregations'].keys())[0]
client.topn(**qry)
elif len(groupby) > 1 or having_filters or not order_desc:
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
if timeseries_limit and is_timeseries:
order_by = metrics[0] if metrics else self.metrics[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['limit_spec'] = {
'type': 'default',
'limit': min(timeseries_limit, row_limit),
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
'columns': [{
'dimension': order_by,
'direction': order_direction,
}],
}
client.groupby(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'],
filters,
)
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
'type': 'default',
'limit': row_limit,
'columns': [{
'dimension': (
metrics[0] if metrics else self.metrics[0]),
'direction': order_direction,
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
return query_str
def query(self, query_obj):
qry_start_dttm = datetime.now()
client = self.cluster.get_pydruid_client()
query_str = self.get_query_str(
client=client, query_obj=query_obj, phase=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_('No data was returned.'))
df.columns = [
DTTM_ALIAS if c == 'timestamp' else c for c in df.columns]
is_timeseries = query_obj['is_timeseries'] \
if 'is_timeseries' in query_obj else True
if (
not is_timeseries and
DTTM_ALIAS in df.columns):
del df[DTTM_ALIAS]
# Reordering columns
cols = []
if DTTM_ALIAS in df.columns:
cols += [DTTM_ALIAS]
cols += [col for col in query_obj['groupby'] if col in df.columns]
cols += [col for col in query_obj['metrics'] if col in df.columns]
df = df[cols]
time_offset = DruidDatasource.time_offset(query_obj['granularity'])
def increment_timestamp(ts):
dt = utils.parse_human_datetime(ts).replace(
tzinfo=DRUID_TZ)
return dt + timedelta(milliseconds=time_offset)
if DTTM_ALIAS in df.columns and time_offset:
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters, num_cols): # noqa
filters = None
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ('in', 'not in'):
eq = [
types.replace('"', '').strip()
if isinstance(types, string_types)
else types
for types in eq]
elif not isinstance(flt['val'], string_types):
eq = eq[0] if eq and len(eq) > 0 else ''
is_numeric_col = col in num_cols
if is_numeric_col:
if op in ('in', 'not in'):
eq = [utils.string_to_num(v) for v in eq]
else:
eq = utils.string_to_num(eq)
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = Dimension(col) != eq
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type='regex', pattern=eq, dimension=col)
elif op == '>=':
cond = Bound(col, eq, None, alphaNumeric=is_numeric_col)
elif op == '<=':
cond = Bound(col, None, eq, alphaNumeric=is_numeric_col)
elif op == '>':
cond = Bound(
col, eq, None,
lowerStrict=True, alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Bound(
col, None, eq,
upperStrict=True, alphaNumeric=is_numeric_col,
)
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>',
}
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
return (
session.query(cls)
.filter_by(cluster_name=database.id)
.filter_by(datasource_name=datasource_name)
.all()
)
sa.event.listen(DruidDatasource, 'after_insert', set_perm)
sa.event.listen(DruidDatasource, 'after_update', set_perm)
| apache-2.0 | -5,881,308,706,200,281,000 | 35.677994 | 89 | 0.527088 | false | 4.143118 | false | false | false |
shsdev/khclass | khclarifai/khclarifai_predict.py | 1 | 1302 | #!/usr/bin/env python
# coding=UTF-8
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from math import floor
from clarifai.rest import ClarifaiApp
from config.configuration import data_path, test_set_id, clarifai_api_key, clarifai_model_name
def floored_percentage(val, digits):
"""Format float value as percentage string"""
val *= 10 ** (digits + 2)
return '{1:.{0}f}%'.format(digits, floor(val) / 10 ** digits)
def get_prediction_confidence(model, image_path):
"""Get the first value's float prediction value"""
print "Processing prediction for image: %s" % image_path
full_image_path = "%s/%s" % (data_path, image_path)
prediction_confidence = 0.0
result = model.predict_by_filename(full_image_path)
for o in result['outputs']:
concept_results = o['data']['concepts']
for concept_result in concept_results:
print concept_result['value']
prediction_confidence = float(concept_result['value'])
break
return prediction_confidence
if __name__ == '__main__':
app = ClarifaiApp(api_key=clarifai_api_key)
mdl = app.models.get(clarifai_model_name)
print floored_percentage(get_prediction_confidence(mdl, "images/test%s/%s" % (test_set_id, "EMK_1303.jpg")), 2)
| gpl-3.0 | -7,671,868,139,313,991,000 | 34.189189 | 115 | 0.667435 | false | 3.304569 | false | false | false |
h2g2bob/ynmp-wikipedia-sync | chgparty_dot.py | 1 | 5461 | import csv
from collections import defaultdict
from collections import namedtuple
class Pty(object):
def __init__(self, ynmp, name, rank=3, color="white"):
self.ynmp = ynmp
self.name = name.replace('"', '').replace("'", "")
self.rank = rank
self.color = color
self.code = "".join(x for x in self.ynmp if x.isalpha())
def __hash__(self):
return hash(self.ynmp)
def __cmp__(self, other):
return cmp(self.ynmp, other)
parties = dict((x.ynmp, x) for x in (
Pty("Conservative Party", "Conservative", 0, "dodgerblue"),
Pty("Labour Party", "Labour", 0, "firebrick1"),
Pty("Liberal Democrats", "Lib Dem", 0, "orange"),
Pty("UK Independence Party (UKIP)", "UKIP", 1, "purple"),
Pty("Green Party", "Green", 1, "green"),
Pty("British National Party", "BNP"),
Pty("Christian Party \"Proclaiming Christ's Lordship\"", "Christian"),
Pty("English Democrats", "Eng Dem"),
Pty("Ulster Unionist Party", "UUP"),
Pty("Trade Unionist and Socialist Coalition", "TUSC"),
Pty("National Health Action Party", "NHA"),
))
party_others = Pty("Others", "Others")
def get_party(ynmp_name, args):
try:
party = parties[ynmp_name]
except KeyError:
if ynmp_name == "Independent":
party = Pty("Independent", "Independent", rank=0 if args.independent else 100)
else:
party = Pty(ynmp_name, ynmp_name)
if party.rank > 5 - args.hide_small:
party = party_others
return party
def format_name(name):
return name
def name_grouping_individual(l):
return [[x] for x in l]
def name_grouping_grouped(l):
return [l]
def print_digraph(by_parties, name_grouping, args):
print "digraph {"
for party in set(n for (n, _) in by_parties.keys()) | set(n for (_, n) in by_parties.keys()):
print "%s [label=\"%s\",style=filled,fillcolor=%s];" % (party.code, party.name, party.color if args.color else "white",)
for ((old, new), full_namelist) in by_parties.items():
for namelist in name_grouping(full_namelist):
print "%s -> %s [label=\"%s\", penwidth=%d, weight=%d, fontsize=10];" % (
old.code,
new.code,
"\\n".join(format_name(name) for name in namelist) if args.names else "",
len(namelist),
len(namelist))
print "}"
def main(args):
by_parties = defaultdict(list)
for _, name, old_name, new_name in csv.reader(open("chgparty.csv")):
old = get_party(old_name, args)
new = get_party(new_name, args)
by_parties[old, new].append(name)
if args.ignore_uup:
by_parties.pop(("Conservative and Unionist Party", "Ulster Unionist Party"), None) # pop with default avoids KeyError
if args.trim_parties:
by_parties = trim_parties(args, by_parties)
if not args.no_others:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if old != "Others" and new != "Others")
if not args.others_to_others:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if old != "Others" or new != "Others")
if args.trim:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if len(namelist) > args.trim or max((old.rank, new.rank)) < args.dont_trim_large)
print_digraph(by_parties, name_grouping_individual if args.single_line else name_grouping_grouped, args)
def trim_parties(args, by_parties):
counts = defaultdict(int)
for (old, new), namelist in by_parties.items():
counts[old] += len(namelist)
counts[new] += len(namelist)
to_trim = set(k for (k, v) in counts.items() if v <= args.trim_parties)
rtn = {}
for (old, new), namelist in by_parties.items():
if old in to_trim:
old = party_others
if new in to_trim:
new = party_others
rtn.setdefault((old, new), []).extend(namelist)
return rtn
if __name__=='__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--trim", action="count", default=0, help="Hide single defections (multiple times to hide less than N defections)")
parser.add_argument("-T", "--dont-trim-large", action="count", default=0, help="Do not hide single defections to/from large political parties")
parser.add_argument("-s", "--hide-small", action="count", default=0, help="Hide small parties (multiple times to hide more parties)")
parser.add_argument("-x", "--trim-parties", action="count", default=0, help="Trim parties with few defections")
parser.add_argument("-o", "--no-others", action="store_false", default=True, help="Hide the combined \"others\" for small parties")
parser.add_argument("-2", "--others-to-others", action="store_true", default=False, help="Show defections from \"others\" to itself")
parser.add_argument("-i", "--independent", action="store_true", default=False, help="Show independent and others as different")
parser.add_argument("-1", "--single-line", action="store_true", default=False, help="Show one line per candidate")
parser.add_argument("-c", "--no-color", action="store_false", dest="color", default=True, help="No color")
parser.add_argument("-n", "--no-names", action="store_false", dest="names", default=True, help="No names")
parser.add_argument("--no-ignore-uup", action="store_false", dest="ignore_uup", default=True, help="The UUP fielded a bunch of candidates jointly with the Conservative Party, using the name \"Conservative and Unionist Party\". The candidates were really UUP people, so this transition is boring.")
args = parser.parse_args()
if args.dont_trim_large and not args.trim:
raise ValueError("You can't use -T without -t")
main(args)
| agpl-3.0 | -6,079,931,843,688,282,000 | 38.572464 | 298 | 0.680461 | false | 2.942349 | false | false | false |
underyx/TheMajorNews | main.py | 1 | 2711 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import config
import requests
from requests_oauthlib import OAuth1
from base64 import b64encode
def get_access_token():
token = config.twitter_app_key + ':' + config.twitter_app_secret
h = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Authorization': b'Basic ' + b64encode(bytes(token, 'utf8'))}
print()
r = requests.post('https://api.twitter.com/oauth2/token',
data=b'grant_type=client_credentials', headers=h)
assert r.json()['token_type'] == 'bearer'
return r.json()['access_token']
def get_latest_tweet(token):
parameters = {'screen_name': 'TwoHeadlines',
'count': 1,
'trim_user': True}
headers = {'Authorization': 'Bearer ' + token}
r = requests.get('https://api.twitter.com/1.1/statuses/user_timeline.json',
params=parameters, headers=headers)
return r.json(encoding='utf8')[0]['text']
def do_translations(tweet, i=0):
i += 1
if i > config.run_limit:
return tweet
ko_parameters = {'q': tweet,
'format': 'text',
'target': 'ko',
'source': 'en',
'key': config.google_key}
ko_r = requests.get('https://www.googleapis.com/language/translate/v2',
params=ko_parameters)
ko_result = ko_r.json()['data']['translations'][0]['translatedText']
en_parameters = {'q': ko_result,
'format': 'text',
'target': 'en',
'source': 'ko',
'key': config.google_key}
en_r = requests.get('https://www.googleapis.com/language/translate/v2',
params=en_parameters)
en_result = en_r.json()['data']['translations'][0]['translatedText']
print('Translation #{} is: {}'.format(i, en_result))
return do_translations(en_result, i) if tweet != en_result else en_result
def post_tweet(tweet):
if len(tweet) > 140:
tweet = tweet[:137] + "..."
auth = OAuth1(config.twitter_app_key, config.twitter_app_secret,
config.twitter_user_key, config.twitter_user_secret)
r = requests.post('https://api.twitter.com/1.1/statuses/update.json',
auth=auth, data={'status': tweet})
return r.json()
def main():
bearer_token = get_access_token()
latest_tweet = get_latest_tweet(bearer_token)
print('Latest Original is: ' + latest_tweet)
translation = do_translations(latest_tweet)
print('Translation is: ' + translation)
post_tweet(translation)
if __name__ == '__main__':
main()
| mit | 2,091,146,900,491,236,900 | 28.467391 | 79 | 0.569532 | false | 3.683424 | true | false | false |
hunse/vrep-python | dvs-play.py | 1 | 1515 | """
Play DVS events in real time
TODO: deal with looping event times for recordings > 65 s
"""
import numpy as np
import matplotlib.pyplot as plt
import dvs
def close(a, b, atol=1e-8, rtol=1e-5):
return np.abs(a - b) < atol + rtol * b
def imshow(image, ax=None):
ax = plt.gca() if ax is None else ax
ax.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
def add_to_image(image, events):
for x, y, s, _ in events:
image[y, x] += 1 if s else -1
def as_image(events):
image = np.zeros((128, 128), dtype=float)
add_to_image(image, events)
return image
# filename = 'dvs.npz'
filename = 'dvs-ball-10ms.npz'
events = dvs.load(filename, dt_round=True)
udiffs = np.unique(np.diff(np.unique(events['t'])))
assert np.allclose(udiffs, 0.01)
plt.figure(1)
plt.clf()
times = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
for i in range(6):
plt.subplot(2, 3, i+1)
imshow(as_image(events[close(events['t'], times[i])]))
plt.title("t = %0.3f" % times[i])
# plt.figure(1)
# plt.clf()
# image = np.zeros((128, 128), dtype=float)
# plt_image = plt.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
# plt.gca().invert_yaxis()
# while t0 < t_max:
# time.sleep(0.001)
# t1 = time.time() - t_world
# new_events = events[(ts > t0) & (ts < t1)]
# dt = t1 - t0
# image *= np.exp(-dt / 0.01)
# for x, y, s, _ in new_events:
# image[y, x] += 1 if s else -1
# plt_image.set_data(image)
# plt.draw()
# t0 = t1
plt.show()
| gpl-2.0 | 5,992,263,968,722,449,000 | 21.279412 | 81 | 0.59538 | false | 2.467427 | false | false | false |
jasonmaier/CircularEconomyBlog | db_repository/versions/013_migration.py | 1 | 1155 | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tasks = Table('tasks', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('priority', INTEGER, nullable=False),
Column('user_id', INTEGER),
Column('task', VARCHAR(length=140)),
)
tasks = Table('tasks', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('description', String(length=140)),
Column('priority', Integer),
Column('user_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['tasks'].columns['task'].drop()
post_meta.tables['tasks'].columns['description'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['tasks'].columns['task'].create()
post_meta.tables['tasks'].columns['description'].drop()
| bsd-3-clause | 1,869,276,016,205,127,000 | 30.216216 | 68 | 0.688312 | false | 3.799342 | false | false | false |
dwaiter/django-filebrowser-old | filebrowser/base.py | 1 | 5606 | # coding: utf-8
# imports
import os, re, datetime
from time import gmtime, strftime
# django imports
from django.conf import settings
# filebrowser imports
from filebrowser.settings import *
from filebrowser.functions import get_file_type, url_join, is_selectable, get_version_path
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
class FileObject(object):
"""
The FileObject represents a File on the Server.
PATH has to be relative to MEDIA_ROOT.
"""
def __init__(self, path):
self.path = path
self.url_rel = path.replace("\\","/")
self.head = os.path.split(path)[0]
self.filename = os.path.split(path)[1]
self.filename_lower = self.filename.lower() # important for sorting
self.filetype = get_file_type(self.filename)
def _filesize(self):
"""
Filesize.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(MEDIA_ROOT, self.path)):
return os.path.getsize(os.path.join(MEDIA_ROOT, self.path))
return ""
filesize = property(_filesize)
def _date(self):
"""
Date.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(MEDIA_ROOT, self.path)):
return os.path.getmtime(os.path.join(MEDIA_ROOT, self.path))
return ""
date = property(_date)
def _datetime(self):
"""
Datetime Object.
"""
return datetime.datetime.fromtimestamp(self.date)
datetime = property(_datetime)
def _extension(self):
"""
Extension.
"""
return u"%s" % os.path.splitext(self.filename)[1]
extension = property(_extension)
def _filetype_checked(self):
if self.filetype == "Folder" and os.path.isdir(self.path_full):
return self.filetype
elif self.filetype != "Folder" and os.path.isfile(self.path_full):
return self.filetype
else:
return ""
filetype_checked = property(_filetype_checked)
def _path_full(self):
"""
Full server PATH including MEDIA_ROOT.
"""
return u"%s" % os.path.join(MEDIA_ROOT, self.path)
path_full = property(_path_full)
def _path_relative(self):
return self.path
path_relative = property(_path_relative)
def _path_relative_directory(self):
"""
Path relative to initial directory.
"""
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
path_relative_directory = property(_path_relative_directory)
def _folder(self):
directory_re = re.compile(r'^(%s)' % (DIRECTORY.rstrip('/')))
return u"%s/" % directory_re.sub('', self.head)
folder = property(_folder)
def _url_relative(self):
return self.url_rel
url_relative = property(_url_relative)
def _url_full(self):
"""
Full URL including MEDIA_URL.
"""
return u"%s" % url_join(MEDIA_URL, self.url_rel)
url_full = property(_url_full)
def _url_save(self):
"""
URL used for the filebrowsefield.
"""
if SAVE_FULL_URL:
return self.url_full
else:
return self.url_rel
url_save = property(_url_save)
def _url_thumbnail(self):
"""
Thumbnail URL.
"""
if self.filetype == "Image":
return u"%s" % url_join(MEDIA_URL, get_version_path(self.path, 'fb_thumb'))
else:
return ""
url_thumbnail = property(_url_thumbnail)
def url_admin(self):
if self.filetype_checked == "Folder":
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
else:
return u"%s" % url_join(MEDIA_URL, self.path)
def _dimensions(self):
"""
Image Dimensions.
"""
if self.filetype == 'Image':
try:
im = Image.open(os.path.join(MEDIA_ROOT, self.path))
return im.size
except:
pass
else:
return False
dimensions = property(_dimensions)
def _width(self):
"""
Image Width.
"""
return self.dimensions[0]
width = property(_width)
def _height(self):
"""
Image Height.
"""
return self.dimensions[1]
height = property(_height)
def _orientation(self):
"""
Image Orientation.
"""
if self.dimensions:
if self.dimensions[0] >= self.dimensions[1]:
return "Landscape"
else:
return "Portrait"
else:
return None
orientation = property(_orientation)
def _is_empty(self):
"""
True if Folder is empty, False if not.
"""
if os.path.isdir(self.path_full):
if not os.listdir(self.path_full):
return True
else:
return False
else:
return None
is_empty = property(_is_empty)
def __repr__(self):
return u"%s" % self.url_save
def __str__(self):
return u"%s" % self.url_save
def __unicode__(self):
return u"%s" % self.url_save
| bsd-3-clause | 1,552,659,356,722,771,000 | 26.082126 | 117 | 0.539422 | false | 3.987198 | false | false | false |
grengojbo/st2 | st2client/st2client/models/core.py | 1 | 11692 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from functools import wraps
import six
from six.moves import urllib
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_env(func):
@wraps(func)
def decorate(*args, **kwargs):
if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None):
kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN')
return func(*args, **kwargs)
return decorate
class Resource(object):
# An alias to use for the resource if different than the class name.
_alias = None
# Display name of the resource. This may be different than its resource
# name specifically when the resource name is composed of multiple words.
_display_name = None
# URL path for the resource.
_url_path = None
# Plural form of the resource name. This will be used to build the
# latter part of the REST URL.
_plural = None
# Plural form of the resource display name.
_plural_display_name = None
# A list of class attributes which will be included in __repr__ return value
_repr_attributes = []
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def to_dict(self, exclude_attributes=None):
"""
Return a dictionary representation of this object.
:param exclude_attributes: Optional list of attributes to exclude.
:type exclude_attributes: ``list``
:rtype: ``dict``
"""
exclude_attributes = exclude_attributes or []
attributes = self.__dict__.keys()
attributes = [attr for attr in attributes if not attr.startswith('__') and
attr not in exclude_attributes]
result = {}
for attribute in attributes:
value = getattr(self, attribute, None)
result[attribute] = value
return result
@classmethod
def get_alias(cls):
return cls._alias if cls._alias else cls.__name__
@classmethod
def get_display_name(cls):
return cls._display_name if cls._display_name else cls.__name__
@classmethod
def get_plural_name(cls):
if not cls._plural:
raise Exception('The %s class is missing class attributes '
'in its definition.' % cls.__name__)
return cls._plural
@classmethod
def get_plural_display_name(cls):
return (cls._plural_display_name
if cls._plural_display_name
else cls._plural)
@classmethod
def get_url_path_name(cls):
if cls._url_path:
return cls._url_path
return cls.get_plural_name().lower()
def serialize(self):
return dict((k, v)
for k, v in six.iteritems(self.__dict__)
if not k.startswith('_'))
@classmethod
def deserialize(cls, doc):
if type(doc) is not dict:
doc = json.loads(doc)
return cls(**doc)
def __str__(self):
return str(self.__repr__())
def __repr__(self):
if not self._repr_attributes:
return super(Resource, self).__repr__()
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
attributes = ','.join(attributes)
class_name = self.__class__.__name__
result = '<%s %s>' % (class_name, attributes)
return result
class ResourceManager(object):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug)
@staticmethod
def handle_error(response):
try:
content = response.json()
fault = content.get('faultstring', '') if content else ''
if fault:
response.reason += '\nMESSAGE: %s' % fault
except Exception as e:
response.reason += ('\nUnable to retrieve detailed message '
'from the HTTP response. %s\n' % str(e))
response.raise_for_status()
@add_auth_token_to_kwargs_from_env
def get_all(self, **kwargs):
# TODO: This is ugly, stop abusing kwargs
url = '/%s' % self.resource.get_url_path_name()
limit = kwargs.pop('limit', None)
pack = kwargs.pop('pack', None)
prefix = kwargs.pop('prefix', None)
params = {}
if limit and limit <= 0:
limit = None
if limit:
params['limit'] = limit
if pack:
params['pack'] = pack
if prefix:
params['prefix'] = prefix
response = self.client.get(url=url, params=params, **kwargs)
if response.status_code != 200:
self.handle_error(response)
return [self.resource.deserialize(item)
for item in response.json()]
@add_auth_token_to_kwargs_from_env
def get_by_id(self, id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), id)
response = self.client.get(url, **kwargs)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
return self.resource.deserialize(response.json())
@add_auth_token_to_kwargs_from_env
def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
"""
Gets a property of a Resource.
id_ : Id of the resource
property_name: Name of the property
self_deserialize: #Implies use the deserialize method implemented by this resource.
"""
token = None
if kwargs:
token = kwargs.pop('token', None)
url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name,
urllib.parse.urlencode(kwargs))
else:
url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
if self_deserialize:
return [self.resource.deserialize(item) for item in response.json()]
else:
return response.json()
@add_auth_token_to_kwargs_from_env
def get_by_ref_or_id(self, ref_or_id, **kwargs):
return self.get_by_id(id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_env
def query(self, **kwargs):
if not kwargs:
raise Exception('Query parameter is not provided.')
if 'limit' in kwargs and kwargs.get('limit') <= 0:
kwargs.pop('limit')
token = kwargs.get('token', None)
params = {}
for k, v in six.iteritems(kwargs):
if k != 'token':
params[k] = v
url = '/%s/?%s' % (self.resource.get_url_path_name(),
urllib.parse.urlencode(params))
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return []
if response.status_code != 200:
self.handle_error(response)
items = response.json()
instances = [self.resource.deserialize(item) for item in items]
return instances
@add_auth_token_to_kwargs_from_env
def get_by_name(self, name_or_id, **kwargs):
instances = self.query(name=name_or_id, **kwargs)
if not instances:
return None
else:
if len(instances) > 1:
raise Exception('More than one %s named "%s" are found.' %
(self.resource.__name__.lower(), name_or_id))
return instances[0]
@add_auth_token_to_kwargs_from_env
def create(self, instance, **kwargs):
url = '/%s' % self.resource.get_url_path_name()
response = self.client.post(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def update(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.put(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def delete(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
return True
@add_auth_token_to_kwargs_from_env
def delete_by_id(self, instance_id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance_id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
try:
resp_json = response.json()
if resp_json:
return resp_json
except:
pass
return True
class ActionAliasResourceManager(ResourceManager):
def __init__(self, resource, endpoint, cacert=None, debug=False):
endpoint = endpoint.replace('v1', 'exp')
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug)
class LiveActionResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_run(self, execution_id, parameters=None, **kwargs):
url = '/%s/%s/re_run' % (self.resource.get_url_path_name(), execution_id)
data = {}
if parameters:
data['parameters'] = parameters
response = self.client.post(url, data, **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
class TriggerInstanceResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_emit(self, trigger_instance_id):
url = '/%s/%s/re_emit' % (self.resource.get_url_path_name(), trigger_instance_id)
response = self.client.post(url, None)
if response.status_code != 200:
self.handle_error(response)
return response.json()
| apache-2.0 | 1,653,460,699,954,385,700 | 33.187135 | 91 | 0.595621 | false | 4 | false | false | false |
lammps/lammps-packages | mingw-cross/cmake-win-on-linux.py | 1 | 14980 | #!/usr/bin/env python
# Script to build windows installer packages for LAMMPS
# (c) 2017,2018,2019,2020 Axel Kohlmeyer <[email protected]>
from __future__ import print_function
import sys,os,shutil,glob,re,subprocess,tarfile,gzip,time,inspect
try: from urllib.request import urlretrieve as geturl
except: from urllib import urlretrieve as geturl
try:
import multiprocessing
numcpus = multiprocessing.cpu_count()
except:
numcpus = 1
# helper functions
def error(str=None):
if not str: print(helpmsg)
else: print(sys.argv[0],"ERROR:",str)
sys.exit()
def getbool(arg,keyword):
if arg in ['yes','Yes','Y','y','on','1','True','true']:
return True
elif arg in ['no','No','N','n','off','0','False','false']:
return False
else:
error("Unknown %s option: %s" % (keyword,arg))
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def getexe(url,name):
gzname = name + ".gz"
geturl(url,gzname)
with gzip.open(gzname,'rb') as gz_in:
with open(name,'wb') as f_out:
shutil.copyfileobj(gz_in,f_out)
gz_in.close()
f_out.close()
os.remove(gzname)
def system(cmd):
try:
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError as e:
print("Command '%s' returned non-zero exit status" % e.cmd)
error(e.output.decode('UTF-8'))
return txt.decode('UTF-8')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# record location and name of python script
homedir, exename = os.path.split(os.path.abspath(inspect.getsourcefile(lambda:0)))
# default settings help message and default settings
bitflag = '64'
parflag = 'no'
pythonflag = False
thrflag = 'omp'
revflag = 'stable'
verbose = False
gitdir = os.path.join(homedir,"lammps")
adminflag = True
msixflag = False
helpmsg = """
Usage: python %s -b <bits> -j <cpus> -p <mpi> -t <thread> -y <yes|no> -r <rev> -v <yes|no> -g <folder> -a <yes|no>
Flags (all flags are optional, defaults listed below):
-b : select Windows variant (default value: %s)
-b 32 : build for 32-bit Windows
-b 64 : build for 64-bit Windows
-j : set number of CPUs for parallel make (default value: %d)
-j <num> : set to any reasonable number or 1 for serial make
-p : select message passing parallel build (default value: %s)
-p mpi : build an MPI parallel version with MPICH2 v1.4.1p1
-p no : build a serial version using MPI STUBS library
-t : select thread support (default value: %s)
-t omp : build with threads via OpenMP enabled
-t no : build with thread support disabled
-y : select python support (default value: %s)
-y yes : build with python included
-y no : build without python
-r : select LAMMPS source revision to build (default value: %s)
-r stable : download and build the latest stable LAMMPS version
-r unstable : download and build the latest patch release LAMMPS version
-r master : download and build the latest development snapshot
-r patch_<date> : download and build a specific patch release
-r <sha256> : download and build a specific snapshot version
-v : select output verbosity
-v yes : print progress messages and output of make commands
-v no : print only progress messages
-g : select folder with git checkout of LAMMPS sources
-g <folder> : use LAMMPS checkout in <folder> (default value: %s)
-a : select admin level installation (default value: yes)
-a yes : the created installer requires to be run at admin level
and LAMMPS is installed to be accessible by all users
-a no : the created installer runs without admin privilege and
LAMMPS is installed into the current user's appdata folder
-a msix : same as "no" but adjust for creating an MSIX package
Example:
python %s -r unstable -t omp -p mpi
""" % (exename,bitflag,numcpus,parflag,thrflag,pythonflag,revflag,gitdir,exename)
# parse arguments
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
if i+1 >= argc:
print("\nMissing argument to flag:",argv[i])
error()
if argv[i] == '-b':
bitflag = argv[i+1]
elif argv[i] == '-j':
numcpus = int(argv[i+1])
elif argv[i] == '-p':
parflag = argv[i+1]
elif argv[i] == '-t':
thrflag = argv[i+1]
elif argv[i] == '-y':
pythonflag = getbool(argv[i+1],"python")
elif argv[i] == '-r':
revflag = argv[i+1]
elif argv[i] == '-v':
verbose = getbool(argv[i+1],"verbose")
elif argv[i] == '-a':
if argv[i+1] in ['msix','MSIX']:
adminflag = False
msixflag = True
else:
msixflag = False
adminflag = getbool(argv[i+1],"admin")
elif argv[i] == '-g':
gitdir = fullpath(argv[i+1])
else:
print("\nUnknown flag:",argv[i])
error()
i+=2
# checks
if bitflag != '32' and bitflag != '64':
error("Unsupported bitness flag %s" % bitflag)
if parflag != 'no' and parflag != 'mpi':
error("Unsupported parallel flag %s" % parflag)
if thrflag != 'no' and thrflag != 'omp':
error("Unsupported threading flag %s" % thrflag)
# test for valid revision name format: branch names, release tags, or commit hashes
rev1 = re.compile("^(stable|unstable|master)$")
rev2 = re.compile(r"^(patch|stable)_\d+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\d{4}$")
rev3 = re.compile(r"^[a-f0-9]{40}$")
if not rev1.match(revflag) and not rev2.match(revflag) and not rev3.match(revflag):
error("Unsupported revision flag %s" % revflag)
# create working directory
if adminflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s" % (bitflag,parflag,thrflag,revflag))
else:
if pythonflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-python" % (bitflag,parflag,thrflag,revflag))
elif msixflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-msix" % (bitflag,parflag,thrflag,revflag))
else:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-noadmin" % (bitflag,parflag,thrflag,revflag))
shutil.rmtree(builddir,True)
try:
os.mkdir(builddir)
except:
error("Cannot create temporary build folder: %s" % builddir)
# check for prerequisites and set up build environment
if bitflag == '32':
cc_cmd = which('i686-w64-mingw32-gcc')
cxx_cmd = which('i686-w64-mingw32-g++')
fc_cmd = which('i686-w64-mingw32-gfortran')
ar_cmd = which('i686-w64-mingw32-ar')
size_cmd = which('i686-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallsmall'
else:
cc_cmd = which('x86_64-w64-mingw32-gcc')
cxx_cmd = which('x86_64-w64-mingw32-g++')
fc_cmd = which('x86_64-w64-mingw32-gfortran')
ar_cmd = which('x86_64-w64-mingw32-ar')
size_cmd = which('x86_64-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallbig'
print("""
Settings: building LAMMPS revision %s for %s-bit Windows
Message passing : %s
Multi-threading : %s
Home folder : %s
Source folder : %s
Build folder : %s
C compiler : %s
C++ compiler : %s
Fortran compiler : %s
Library archiver : %s
""" % (revflag,bitflag,parflag,thrflag,homedir,gitdir,builddir,cc_cmd,cxx_cmd,fc_cmd,ar_cmd))
# create/update git checkout
if not os.path.exists(gitdir):
txt = system("git clone https://github.com/lammps/lammps.git %s" % gitdir)
if verbose: print(txt)
os.chdir(gitdir)
txt = system("git fetch origin")
if verbose: print(txt)
txt = system("git checkout %s" % revflag)
if verbose: print(txt)
if revflag == "master" or revflag == "stable" or revflag == "unstable":
txt = system("git pull")
if verbose: print(txt)
# switch to build folder
os.chdir(builddir)
# download what is not automatically downloaded by CMake
print("Downloading third party tools")
url='http://download.lammps.org/thirdparty'
print("FFMpeg")
getexe("%s/ffmpeg-win%s.exe.gz" % (url,bitflag),"ffmpeg.exe")
print("gzip")
getexe("%s/gzip.exe.gz" % url,"gzip.exe")
if parflag == "mpi":
mpiflag = "on"
else:
mpiflag = "off"
if thrflag == "omp":
ompflag = "on"
else:
ompflag = "off"
print("Configuring build with CMake")
cmd = "mingw%s-cmake -G Ninja -D CMAKE_BUILD_TYPE=Release" % bitflag
cmd += " -D ADD_PKG_CONFIG_PATH=%s/mingw%s-pkgconfig" % (homedir,bitflag)
cmd += " -C %s/mingw%s-pkgconfig/addpkg.cmake" % (homedir,bitflag)
cmd += " -C %s/cmake/presets/mingw-cross.cmake %s/cmake" % (gitdir,gitdir)
cmd += " -DBUILD_SHARED_LIBS=on -DBUILD_MPI=%s -DBUILD_OPENMP=%s" % (mpiflag,ompflag)
cmd += " -DWITH_GZIP=on -DWITH_FFMPEG=on -DLAMMPS_EXCEPTIONS=on"
cmd += " -DINTEL_LRT_MODE=c++11 -DBUILD_LAMMPS_SHELL=on"
cmd += " -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
if pythonflag: cmd += " -DPKG_PYTHON=yes"
print("Running: ",cmd)
txt = system(cmd)
if verbose: print(txt)
print("Compiling")
system("ninja")
print("Done")
print("Building PDF manual")
os.chdir(os.path.join(gitdir,"doc"))
txt = system("make pdf")
if verbose: print(txt)
shutil.move("Manual.pdf",os.path.join(builddir,"LAMMPS-Manual.pdf"))
print("Done")
# switch back to build folder and copy/process files for inclusion in installer
print("Collect and convert files for the Installer package")
os.chdir(builddir)
shutil.copytree(os.path.join(gitdir,"examples"),os.path.join(builddir,"examples"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"bench"),os.path.join(builddir,"bench"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"tools"),os.path.join(builddir,"tools"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"python","lammps"),os.path.join(builddir,"python","lammps"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"potentials"),os.path.join(builddir,"potentials"),symlinks=False)
shutil.copy(os.path.join(gitdir,"README"),os.path.join(builddir,"README.txt"))
shutil.copy(os.path.join(gitdir,"LICENSE"),os.path.join(builddir,"LICENSE.txt"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","colvars-refman-lammps.pdf"),os.path.join(builddir,"Colvars-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"tools","createatoms","Manual.pdf"),os.path.join(builddir,"CreateAtoms-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","kspace.pdf"),os.path.join(builddir,"Kspace-Extra-Info.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_gayberne_extra.pdf"),os.path.join(builddir,"PairGayBerne-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_resquared_extra.pdf"),os.path.join(builddir,"PairReSquared-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_overview.pdf"),os.path.join(builddir,"PDLAMMPS-Overview.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_EPS.pdf"),os.path.join(builddir,"PDLAMMPS-EPS.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_VES.pdf"),os.path.join(builddir,"PDLAMMPS-VES.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SPH_LAMMPS_userguide.pdf"),os.path.join(builddir,"SPH-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SMD_LAMMPS_userguide.pdf"),os.path.join(builddir,"SMD-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","USER-CGDNA.pdf"),os.path.join(builddir,"CGDNA-Manual.pdf"))
# prune outdated inputs, too large files, or examples of packages we don't bundle
for d in ['accelerate','kim','mscg','USER/quip','USER/vtk']:
shutil.rmtree(os.path.join("examples",d),True)
for d in ['FERMI','KEPLER']:
shutil.rmtree(os.path.join("bench",d),True)
shutil.rmtree("tools/msi2lmp/test",True)
os.remove("potentials/C_10_10.mesocnt")
os.remove("potentials/TABTP_10_10.mesont")
os.remove("examples/USER/mesont/C_10_10.mesocnt")
os.remove("examples/USER/mesont/TABTP_10_10.mesont")
# convert text files to CR-LF conventions
txt = system("unix2dos LICENSE.txt README.txt tools/msi2lmp/README")
if verbose: print(txt)
txt = system("find bench examples potentials python tools/msi2lmp/frc_files -type f -print | xargs unix2dos")
if verbose: print(txt)
# mass rename README to README.txt
txt = system('for f in $(find tools bench examples potentials python -name README -print); do mv -v $f $f.txt; done')
if verbose: print(txt)
# mass rename in.<name> to in.<name>.lmp
txt = system('for f in $(find bench examples -name in.\* -print); do mv -v $f $f.lmp; done')
if verbose: print(txt)
print("Done")
print("Configuring and building installer")
os.chdir(builddir)
if pythonflag:
nsisfile = os.path.join(homedir,"installer","lammps-python.nsis")
elif adminflag:
nsisfile = os.path.join(homedir,"installer","lammps-admin.nsis")
else:
if msixflag:
nsisfile = os.path.join(homedir,"installer","lammps-msix.nsis")
else:
nsisfile = os.path.join(homedir,"installer","lammps-noadmin.nsis")
shutil.copy(nsisfile,os.path.join(builddir,"lammps.nsis"))
shutil.copy(os.path.join(homedir,"installer","FileAssociation.nsh"),os.path.join(builddir,"FileAssociation.nsh"))
shutil.copy(os.path.join(homedir,"installer","lammps.ico"),os.path.join(builddir,"lammps.ico"))
shutil.copy(os.path.join(homedir,"installer","lammps-text-logo-wide.bmp"),os.path.join(builddir,"lammps-text-logo-wide.bmp"))
shutil.copytree(os.path.join(homedir,"installer","envvar"),os.path.join(builddir,"envvar"),symlinks=False)
# define version flag of the installer:
# - use current timestamp, when pulling from master (for daily builds)
# - parse version from src/version.h when pulling from stable, unstable, or specific tag
# - otherwise use revflag, i.e. the commit hash
version = revflag
if revflag == 'stable' or revflag == 'unstable' or rev2.match(revflag):
with open(os.path.join(gitdir,"src","version.h"),'r') as v_file:
verexp = re.compile(r'^.*"(\w+) (\w+) (\w+)".*$')
vertxt = v_file.readline()
verseq = verexp.match(vertxt).groups()
version = "".join(verseq)
elif revflag == 'master':
version = time.strftime('%Y-%m-%d')
if bitflag == '32':
mingwdir = '/usr/i686-w64-mingw32/sys-root/mingw/bin/'
elif bitflag == '64':
mingwdir = '/usr/x86_64-w64-mingw32/sys-root/mingw/bin/'
if parflag == 'mpi':
txt = system("makensis -DMINGW=%s -DVERSION=%s-MPI -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
else:
txt = system("makensis -DMINGW=%s -DVERSION=%s -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
# clean up after successful build
os.chdir('..')
print("Cleaning up...")
shutil.rmtree(builddir,True)
print("Done.")
| mit | -3,950,353,748,060,748,000 | 38.21466 | 128 | 0.672029 | false | 2.959889 | false | false | false |
Kopachris/seshet | seshet/bot.py | 1 | 18891 | """Implement SeshetBot as subclass of ircutils3.bot.SimpleBot."""
import logging
import os
from io import StringIO
from datetime import datetime
from ircutils3 import bot, client
from .utils import KVStore, Storage, IRCstr
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
def _add_channel_names(client, e):
"""Add a new channel to self.channels and initialize its user list.
Called as event handler for RPL_NAMES events. Do not call directly.
"""
chan = IRCstr(e.channel)
names = set([IRCstr(n) for n in e.name_list])
client.channels[chan] = SeshetChannel(chan, names) | bsd-3-clause | 1,934,334,764,152,155,100 | 34.984762 | 85 | 0.505214 | false | 4.44285 | false | false | false |
Naeka/vosae-app | www/organizer/api/resources/event.py | 1 | 10316 | # -*- coding:Utf-8 -*-
from django.conf.urls import url
from django.core.exceptions import ObjectDoesNotExist
from tastypie import fields as base_fields, http
from tastypie.utils import trailing_slash
from tastypie.validation import Validation
from tastypie_mongoengine import fields
from dateutil.parser import parse
from core.api.utils import TenantResource
from organizer.models import VosaeEvent, DATERANGE_FILTERS
from organizer.api.doc import HELP_TEXT
__all__ = (
'VosaeEventResource',
)
class EventValidation(Validation):
def is_valid(self, bundle, request=None):
from django.utils.timezone import is_naive
errors = {}
for field in ['start', 'end']:
data = bundle.data.get(field)
if not data.get('date', None) and not data.get('datetime', None):
errors['__all__'] = ["One of 'date' and 'datetime' must be set."]
elif data.get('date', None) and data.get('datetime', None):
errors['__all__'] = ["Only one of 'date' and 'datetime' must be set. The 'date' field is used for all-day events."]
elif data.get('datetime', None) and is_naive(parse(data.get('datetime'))) and not data.get('timezone', None):
errors['datetime'] = ["A timezone offset is required if not specified in the 'timezone' field"]
return errors
class VosaeEventResource(TenantResource):
status = base_fields.CharField(
attribute='status',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['status']
)
created_at = base_fields.DateTimeField(
attribute='created_at',
readonly=True,
help_text=HELP_TEXT['vosae_event']['created_at']
)
updated_at = base_fields.DateTimeField(
attribute='updated_at',
readonly=True,
help_text=HELP_TEXT['vosae_event']['updated_at']
)
summary = base_fields.CharField(
attribute='summary',
help_text=HELP_TEXT['vosae_event']['summary']
)
description = base_fields.CharField(
attribute='description',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['description']
)
location = base_fields.CharField(
attribute='location',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['location']
)
color = base_fields.CharField(
attribute='color',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['color']
)
start = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='start',
help_text=HELP_TEXT['vosae_event']['start']
)
end = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='end',
help_text=HELP_TEXT['vosae_event']['end']
)
recurrence = base_fields.CharField(
attribute='recurrence',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['recurrence']
)
original_start = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='original_start',
readonly=True,
help_text=HELP_TEXT['vosae_event']['original_start']
)
instance_id = base_fields.CharField(
attribute='instance_id',
readonly=True,
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['instance_id']
)
transparency = base_fields.CharField(
attribute='transparency',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['transparency']
)
calendar = fields.ReferenceField(
to='organizer.api.resources.VosaeCalendarResource',
attribute='calendar',
help_text=HELP_TEXT['vosae_event']['calendar']
)
creator = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='creator',
readonly=True,
help_text=HELP_TEXT['vosae_event']['creator']
)
organizer = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='organizer',
readonly=True,
help_text=HELP_TEXT['vosae_event']['organizer']
)
attendees = fields.EmbeddedListField(
of='organizer.api.resources.AttendeeResource',
attribute='attendees',
null=True,
blank=True,
full=True,
help_text=HELP_TEXT['vosae_event']['attendees']
)
reminders = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.ReminderSettingsResource',
attribute='reminders',
blank=True,
help_text=HELP_TEXT['vosae_event']['reminders']
)
class Meta(TenantResource.Meta):
resource_name = 'vosae_event'
queryset = VosaeEvent.objects.all()
excludes = ('tenant', 'occurrences', 'next_reminder', 'ical_uid', 'ical_data')
filtering = {
'start': ('exact', 'gt', 'gte'),
'end': ('exact', 'lt', 'lte'),
'calendar': ('exact')
}
validation = EventValidation()
def prepend_urls(self):
"""Add urls for resources actions."""
urls = super(VosaeEventResource, self).prepend_urls()
urls.extend((
url(r'^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/instances%s$' % (self._meta.resource_name, trailing_slash()), self.wrap_view('event_instances'), name='api_vosae_event_instances'),
))
return urls
def build_filters(self, filters=None):
qs_filters = super(VosaeEventResource, self).build_filters(filters)
for filter_name, filter_value in qs_filters.iteritems():
if filter_name.endswith('__exact'):
new_name = filter_name[:filter_name.index('__exact')]
qs_filters[new_name] = filter_value
del qs_filters[filter_name]
filter_name = new_name
if filter_name in DATERANGE_FILTERS:
if isinstance(filter_value, basestring):
qs_filters[filter_name] = parse(filter_value)
return qs_filters
def get_object_list(self, request):
"""Filters events based on calendar accesses (extracted from request user)"""
from organizer.models import VosaeCalendar
object_list = super(VosaeEventResource, self).get_object_list(request)
principals = [request.vosae_user] + request.vosae_user.groups
calendars = VosaeCalendar.objects.filter(acl__read_list__in=principals, acl__negate_list__nin=principals)
return object_list.filter(calendar__in=list(calendars))
def apply_filters(self, request, applicable_filters):
object_list = super(VosaeEventResource, self).apply_filters(request, applicable_filters)
filters = request.GET
if 'single_events' in filters and filters['single_events'] in ['true', 'True', True]:
start = None
end = None
for filter_name, filter_value in filters.iteritems():
try:
if filter_name.startswith('start'):
start = parse(filter_value)
elif filter_name.startswith('end'):
end = parse(filter_value)
except:
pass
return object_list.with_instances(start, end)
return object_list
def event_instances(self, request, **kwargs):
"""List all instances of the event"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
try:
bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle, **self.remove_api_resource_names(kwargs)).with_instances()
except ObjectDoesNotExist:
return http.HttpNotFound()
if objects.count() < 2:
return http.HttpNotFound()
sorted_objects = self.apply_sorting(objects, options=request.GET)
first_objects_bundle = self.build_bundle(obj=objects[0], request=request)
instances_resource_uri = '%sinstances/' % self.get_resource_uri(first_objects_bundle)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=instances_resource_uri, limit=self._meta.limit)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]
to_be_serialized['objects'] = [self.full_dehydrate(b) for b in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def full_hydrate(self, bundle):
"""Set event's creator and organizer"""
bundle = super(VosaeEventResource, self).full_hydrate(bundle)
bundle.obj.creator = bundle.request.vosae_user
# Organizer should be the user owner of the calendar
try:
organizer = bundle.obj.calendar.acl.get_owner()
except:
organizer = bundle.request.vosae_user
bundle.obj.organizer = organizer
return bundle
def full_dehydrate(self, bundle, for_list=False):
bundle = super(VosaeEventResource, self).full_dehydrate(bundle, for_list=for_list)
if not bundle.data['instance_id']:
del bundle.data['instance_id']
return bundle
def dehydrate(self, bundle):
"""Dehydrates the appropriate CalendarList which differs according to user (extracted from request)"""
from organizer.models import CalendarList
from organizer.api.resources import CalendarListResource
bundle = super(VosaeEventResource, self).dehydrate(bundle)
calendar_list = CalendarList.objects.get(calendar=bundle.obj.calendar, vosae_user=bundle.request.vosae_user)
calendar_list_resource = CalendarListResource()
calendar_list_resource_bundle = calendar_list_resource.build_bundle(obj=calendar_list, request=bundle.request)
bundle.data['calendar_list'] = calendar_list_resource.get_resource_uri(calendar_list_resource_bundle)
return bundle
| agpl-3.0 | -4,812,843,896,607,923,000 | 38.830116 | 190 | 0.630186 | false | 3.926913 | false | false | false |
m-tmatma/svnmailer | src/lib/svnmailer/settings.py | 1 | 19703 | # -*- coding: utf-8 -*-
#
# Copyright 2004-2006 André Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runtime settings for the svnmailer
==================================
This module defines one public class, called L{Settings}. This class is the
storage container for all settings used by the svnmailer. L{Settings} is an
abstract class. There is just one method that must be implemented --
L{Settings.init}. This method is responsible for filling the container
properly. An implementor of the L{Settings} class can be found in the
L{svnmailer.config} module.
This module further defines the Settings subcontainers
L{GroupSettingsContainer}, L{GeneralSettingsContainer} and
L{RuntimeSettingsContainer}, but you should not instantiate them directly --
L{Settings} provides methods that return instances of these containers.
"""
__author__ = "André Malo"
__docformat__ = "epytext en"
__all__ = ['Settings', 'modes']
# global imports
from svnmailer import typedstruct, struct_accessors
class _Tokens(object):
""" Generic token container
@ivar valid_tokens: The valid mode tokens (str, str, ...)
@type valid_tokens: C{tuple}
"""
valid_tokens = ()
def __init__(self, *args):
""" Initialization """
self.valid_tokens = args
for token in self.valid_tokens:
setattr(self, token.encode('us-ascii'), token)
modes = _Tokens('commit', 'propchange', 'lock', 'unlock')
xpath = _Tokens(u'yes', u'no', u'ignore')
showenc = _Tokens(u'yes', u'no', u'nondefault')
def groupMembers(space):
""" Define the members of the group settings
The following members are defined:
- C{_name}: Name of the group
- C{_def_for_repos}: default for_repos regex
- C{_def_for_paths}: default for_paths regex
- C{for_repos}: Repository regex
- C{for_paths}: Path regex (inside the repos)
- C{exclude_paths}: Exclude path regex to prevent for_paths from
being applied
- C{ignore_if_other_matches}: this group will be ignored if there
are any other groups selected for a particular path
- C{show_nonmatching_paths}: How to deal with paths that are not
matched by the group
- C{commit_subject_template}: Subject template for commit mail
- C{propchange_subject_template}: Subject template for revpropchanges
- C{lock_subject_template}: Subject template for locks
- C{unlock_subject_template}: Subject template for unlocks
- C{commit_subject_prefix}: Subject prefix for commit mail
- C{propchange_subject_prefix}: Subject prefix for revpropchanges
- C{lock_subject_prefix}: Subject prefix for locks
- C{unlock_subject_prefix}: Subject prefix for unlocks
- C{max_subject_length}: Maximum subject length
- C{from_addr}: C{From:} address format string
- C{to_addr}: C{To:} address format string
- C{to_fake}: C{To:} non-address format string
- C{bcc_addr}: C{Bcc:} address format string
- C{reply_to_addr}: C{Reply-To:} address format string
- C{diff_command}: The diff command to use
- C{generate_diffs}: List of actions for which diffs are generated
- C{browser_base_url}: type and format string of the repository
browser base url
- C{custom_header}: custom header name and format template
- C{to_newsgroup}: The newsgroup where the notification should be
posted to
- C{long_news_action}: The action to take on huge commit postings
- C{long_mail_action}: The action to take on huge commit mails
- C{mail_transfer_encoding}: Content-Transfer-Encoding for mails
- C{news_transfer_encoding}: Content-Transfer-Encoding for news
- C{mail_type}: The mail construction type
- C{extract_x509_author}: Treat author as x509 subject and try to
extract author's real name and email address
- C{cia_project_name}: The project name used for CIA notifications
- C{cia_project_module}: The project module used for CIA
notifications
- C{cia_project_branch}: The project branch used for CIA
notifications
- C{cia_project_submodule}: The project submodule used for CIA
notifications
- C{cia_project_path}: The project path, which will be stripped from
the absolute node path
- C{apply_charset_property}: Should svnmailer:content-charset
properties be recognized?
- C{show_applied_charset}: Show the encoding of the files in the
diff?
- C{viewcvs_base_url}: (I{deprecated}, use C{browser_base_url}
instead) format string for the viewcvs URL
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'_name' : 'unicode',
'_def_for_repos' : 'regex',
'_def_for_paths' : 'regex',
'for_repos' : ('regex', {'map': True}),
'for_paths' : ('regex', {'map': True}),
'exclude_paths' : ('regex', {'map': True}),
'ignore_if_other_matches' : 'humanbool',
'show_nonmatching_paths' : ('token',
{'map': True,
'allowed': xpath.valid_tokens}),
'commit_subject_template' : ('unicode', {'map': True}),
'propchange_subject_template': ('unicode', {'map': True}),
'lock_subject_template' : ('unicode', {'map': True}),
'unlock_subject_template' : ('unicode', {'map': True}),
'commit_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'propchange_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'lock_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'unlock_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'max_subject_length' : 'int',
'from_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'to_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'to_fake' : ('unicode',
{'subst': True, 'map': True}),
'bcc_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'reply_to_addr' : ('unicode',
{'subst': True, 'map': True}),
'to_newsgroup' : ('tokenlist',
{'subst': True, 'map': True}),
'diff_command' : ('unicommand', {'map': True}),
'generate_diffs' : 'tokenlist',
'browser_base_url' : ('unicode',
{'subst': True, 'map': True}),
'long_mail_action' : ('mailaction', {'map': True}),
'long_news_action' : ('mailaction', {'map': True}),
'mail_type' : ('unicode', {'map': True}),
'mail_transfer_encoding' : 'unicode',
'news_transfer_encoding' : 'unicode',
'custom_header' : ('unicode',
{'subst': True, 'map': True}),
'extract_x509_author' : 'humanbool',
'cia_rpc_server' : ('unicode', {'map': True}),
'cia_project_name' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_module' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_branch' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_submodule' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_path' : ('unicode',
{'subst': True, 'map': True}),
'apply_charset_property' : 'humanbool',
'show_applied_charset' : ('token',
{'allowed': showenc.valid_tokens}),
# deprecated
'viewcvs_base_url' : ('unicode',
{'subst': True, 'map': True}),
},
'aliases': {
'suppress_if_match' : 'ignore_if_other_matches',
'fallback' : 'ignore_if_other_matches',
'reply_to' : 'reply_to_addr',
'x509_author' : 'extract_x509_author',
'charset_property' : 'apply_charset_property',
'truncate_subject' : 'max_subject_length',
'subject_length' : 'max_subject_length',
'diff' : 'diff_command',
'nonmatching_paths' : 'show_nonmatching_paths',
'nongroup_paths' : 'show_nonmatching_paths',
'show_nongroup_paths': 'show_nonmatching_paths',
},
}
return typedstruct.members(**args)
def generalMembers(space):
""" Define the members of the general settings
The following members are defined:
- C{diff_command}: The diff command
- C{sendmail_command}: The sendmail compatible command
- C{ssl_mode}: ssl mode
- C{smtp_host}: The smtp host (C{host[:port]})
- C{smtp_user}: The smtp auth. user
- C{smtp_pass}: The smtp auth. password
- C{debug_all_mails_to}: All mails are sent to these addresses
(for debugging purposes)
- C{cia_rpc_server}: The XML-RPC server running the CIA tracker
- C{tempdir}: The directory to use for temporary files
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'sendmail_command' : ('unicommand', {'map': True}),
'ssl_mode' : ('unicode', {'map': True}),
'smtp_host' : ('unicode', {'map': True}),
'smtp_user' : ('quotedstr', {'map': True}),
'smtp_pass' : ('quotedstr', {'map': True}),
'nntp_host' : ('unicode', {'map': True}),
'nntp_user' : ('quotedstr', {'map': True}),
'nntp_pass' : ('quotedstr', {'map': True}),
'debug_all_mails_to': ('tokenlist', {'map': True}),
'tempdir' : ('filename', {'map': True}),
# deprecated
'cia_rpc_server' : ('unicode', {'map': True}),
'diff_command' : ('unicommand', {'map': True}),
},
'aliases' : {
'mail_command' : 'sendmail_command',
'smtp_hostname': 'smtp_host',
'diff' : 'diff_command',
},
}
return typedstruct.members(**args)
def runtimeMembers(space):
""" Define the members of the runtime settings
The following members are defined:
- C{_repos}: The repository object
- C{stdin}: The stdin, read once
- C{path_encoding}: The path-encoding parameter
- C{debug}: debug mode (True/False)
- C{revision}: committed revision number
- C{repository}: path to the repository
- C{config}: supplied config file name
- C{mode}: running mode (see L{modes})
- C{author}: Author of the commit or revpropchange
- C{propname}: Property changed (in revpropchange)
- C{action}: The revprop action (M, A, D)
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'_repos' : None, # internal usage (Repository object)
'stdin' : 'stdin',
'path_encoding': 'string',
'debug' : 'bool',
'revision' : 'int',
'repository' : 'filename',
'config' : 'filename',
'mode' : 'string',
'author' : 'unicode',
'propname' : 'unicode',
'action' : 'unicode', # >= svn 1.2
},
'aliases' : None,
}
return typedstruct.members(**args)
class GroupSettingsContainer(typedstruct.Struct):
""" Container for group settings
@see: L{groupMembers} for the actual member list
"""
__slots__ = groupMembers(locals())
def _compare(self, other):
""" compare some of the attributes
@note: It uses a list of attributes that are compared if two
of these types are tested for equality. Keep in mind that
this comparision takes place, when the decision is made
whether a mail for more than one group should be sent more
than once (if the groups are not equal). All attributes, but
the ones returned by L{_getIgnorableMembers} are compared.
@see: L{_getIgnorableMembers}
@param other: The object compared to
@type other: C{GroupSettingsContainer}
@return: Are the objects equal?
@rtype: C{bool}
"""
if type(self) != type(other):
return False
attrs = [name for name in self._members_
if name not in self._getIgnorableMembers()
]
for name in attrs:
if getattr(self, name) != getattr(other, name):
return False
return True
def _getIgnorableMembers(self):
""" Returns the list of member names that be ignored in comparisons
This method called by L{_compare}. Override this method to modify
the list.
@return: The list
@rtype: C{list}
"""
return [
'_name', '_def_for_repos', '_def_for_paths',
'for_repos', 'for_paths', 'exclude_paths',
'ignore_if_other_matches', 'to_addr', 'from_addr',
'to_newsgroup', 'custom_header', 'cia_rpc_server',
'cia_project_name', 'cia_project_module', 'cia_project_branch',
'cia_project_submodule', 'cia_project_path',
]
class GeneralSettingsContainer(typedstruct.Struct):
""" Container for general settings
@see: L{generalMembers} for the actual member list
"""
__slots__ = generalMembers(locals())
class RuntimeSettingsContainer(typedstruct.Struct):
""" Container for runtime settings
@see: L{runtimeMembers} for the actual member list
"""
__slots__ = runtimeMembers(locals())
class Settings(object):
""" Settings management
@note: The C{init} method must be overridden to do the actual
initialization.
@ivar groups: group settings list
@type groups: C{list} of C{GroupSettingsContainer}
@ivar general: General settings
@type general: C{GeneralSettingsContainer}
@ivar runtime: Runtime settigs
@type runtime: C{RuntimeSettingsContainer}
@ivar debug: Debug state
@type debug: C{bool}
@ivar _charset_: The charset used for settings recoding
@type _charset_: C{str}
@ivar _maps_: The value mappers to use or C{None}
@type _maps_: C{dict}
"""
def __init__(self, *args, **kwargs):
""" Constructor
Don't override this one. Override C{init()} instead.
"""
# supply default values
self._charset_ = 'us-ascii'
self._fcharset_ = None
self._maps_ = None
self.groups = []
self.general = None
self.runtime = None
# parameter initialization
self.init(*args, **kwargs)
# sanity check
self._checkInitialization()
def _checkInitialization(self):
""" Checks if all containers are filled """
if not(self.general and self.runtime and self.groups):
raise RuntimeError("Settings are not completely initialized")
def init(self, *args, **kwargs):
""" Abstract initialization method """
raise NotImplementedError()
def _getArgs(self):
""" Returns the basic arguments for container initialization
@return: The args
@rtype: C{list}
"""
return [
self._maps_,
{'encoding': self._charset_, 'path_encoding': self._fcharset_}
]
def getGroupContainer(self, **kwargs):
""" Returns an initialized group settings container
@return: The container object
@rtype: C{GroupSettingsContainer}
"""
return GroupSettingsContainer(*self._getArgs(), **kwargs)
def getDefaultGroupContainer(self, **kwargs):
""" Returns an initialized default group settings container
@return: The container object
@rtype: C{GroupSettingsContainer}
"""
args = self._getArgs()
args[0] = None # no maps
return GroupSettingsContainer(*args, **kwargs)
def getGeneralContainer(self, **kwargs):
""" Returns an initialized general settings container
@return: The container object
@rtype: C{GeneralSettingsContainer}
"""
return GeneralSettingsContainer(*self._getArgs(), **kwargs)
def getRuntimeContainer(self, **kwargs):
""" Returns an initialized runtime settings container
Note that the runtime settings (from commandline)
are always assumed to be utf-8 encoded.
@return: The container object
@rtype: C{RuntimeSettingsContainer}
"""
args = self._getArgs()
args[0] = None
args[1]["encoding"] = "utf-8"
return RuntimeSettingsContainer(*args, **kwargs)
| apache-2.0 | 704,259,331,109,030,700 | 39.206122 | 81 | 0.526115 | false | 4.425202 | false | false | false |
polyaxon/polyaxon-api | polyaxon_lib/estimators/hooks/general_hooks.py | 1 | 2631 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from polyaxon_lib.estimators.hooks.utils import can_run_hook
class GlobalStepWaiterHook(basic_session_run_hooks.GlobalStepWaiterHook):
"""Delay execution until global step reaches to wait_until_step.
(A mirror to tensorflow.python.training.basic_session_run_hooks GlobalStepWaiterHook.)
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
pass
class FinalOpsHook(basic_session_run_hooks.FinalOpsHook):
"""A run hook which evaluates `Tensors` at the end of a session.
(A mirror to tensorflow.python.training.basic_session_run_hooks GlobalStepWaiterHook.)
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running `final_ops_dict`.
"""
pass
class StopAfterNEvalsHook(evaluation._StopAfterNEvalsHook): # pylint: disable=protected-access
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
pass
class NanTensorHook(basic_session_run_hooks.NanTensorHook):
"""NaN Loss monitor.
A modified version of tensorflow.python.training.basic_session_run_hooks NanTensorHook.
Checks the context for `no_run_hooks_op` before calling the the hook.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
def before_run(self, run_context): # pylint: disable=unused-argument
if can_run_hook(run_context):
return super(NanTensorHook, self).before_run(run_context)
return None
def after_run(self, run_context, run_values):
if can_run_hook(run_context):
return super(NanTensorHook, self).after_run(run_context, run_values)
GENERAL_HOOKS = OrderedDict([
('GlobalStepWaiterHook', GlobalStepWaiterHook),
('FinalOpsHook', FinalOpsHook),
('StopAfterNEvalsHook', StopAfterNEvalsHook),
('NanTensorHook', NanTensorHook)
])
| mit | -773,701,700,634,766,200 | 34.08 | 96 | 0.717598 | false | 3.774749 | false | false | false |
ShovanSarker/mango_office | actions/views.py | 1 | 15639 | from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from users.models import AllUsers, ACL
from status.models import Status
from task.models import Task
import datetime
from attendance.models import AttendanceInOffice, AttendanceInHome
from django.contrib.auth.models import User
# Create your views here.
@csrf_exempt
def login_page(request):
return render(request, 'login.html')
@csrf_exempt
def login_auth(request):
post_data = request.POST
print(post_data)
if 'username' and 'password' in post_data:
print(post_data['username'])
print(post_data['password'])
user = authenticate(username=post_data['username'], password=post_data['password'])
if user is not None:
if user.is_active:
login(request, user)
request.session['user'] = post_data['username']
if user.is_superuser:
res = redirect('/admin')
else:
res = redirect('/')
else:
res = render(request, 'login.html',
{'wrong': True,
'text': 'The password is valid, but the account has been disabled!'})
else:
res = render(request, 'login.html',
{'wrong': True,
'text': 'The username and password you have entered is not correct. Please retry'})
else:
res = render(request, 'login.html', {'wrong': False})
res['Access-Control-Allow-Origin'] = "*"
res['Access-Control-Allow-Headers'] = "Origin, X-Requested-With, Content-Type, Accept"
res['Access-Control-Allow-Methods'] = "PUT, GET, POST, DELETE, OPTIONS"
return res
def logout_now(request):
logout(request)
return redirect('/login')
@login_required(login_url='/login/')
def home(request):
page_title = 'Home'
user = request.session['user']
if not AllUsers.objects.exists():
print(request.session['user'])
new_status = Status.objects.get(StatusKey='office')
new_user = AllUsers(username=user, Name=user, Email=user + '@inflack.com', Status=new_status)
new_user.save()
new_user_acl = ACL(user=new_user,
CanSeeOthersTaskList=True,
CanSeeOthersAttendance=True,
CanAddMoreEmployee=True,
CanSeeOthersDetails=True,
CanSeeOthersStatus=True)
new_user_acl.save()
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
if this_user.Active:
all_status = Status.objects.all()
display = render(request, 'client_dashboard.html', {'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'all_status': all_status,
'page_title': page_title})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'You are not authorized to login. Please contact administrator for more details'})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'Something went wrong. Please LOGIN again.'})
return display
@login_required(login_url='/login/')
def add_employee(request):
user = request.session['user']
post_data = request.POST
this_user = AllUsers.objects.get(username__exact=user)
# login_user = this_user.Name
# print(post_data['super-admin'])
if 'username' in post_data and 'csrfmiddlewaretoken' in post_data:
if AllUsers.objects.filter(username__exact=user).exists():
if this_user.Active and this_user.acl.CanAddMoreEmployee:
if AllUsers.objects.filter(username__exact=post_data['username']).exists() or \
post_data['username'] == 'admin':
# This username is already taken
print(post_data)
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'wrong': True,
'text': 'This USERNAME is already taken.'
'Please try with a different one'})
else:
if post_data['password'] == post_data['re-password']:
# password matches
print(post_data)
new_status = Status.objects.get(StatusKey='office')
new_user = AllUsers(username=post_data['username'],
Name=post_data['name'],
Designation=post_data['designation'],
Phone=post_data['phone'],
Email=post_data['email'],
Status=new_status)
new_user.save()
new_user_acl = ACL(user=new_user)
new_user_acl.save()
new_user_login = User.objects.create_user(post_data['username'],
post_data['email'],
post_data['password'])
new_user_login.save()
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'success': True,
'text': 'New employee has been '
'added successfully.'})
else:
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'wrong': True,
'text': 'The passwords do not match.'
'Please try again'})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'You are not authorized to login.'
' Please contact administrator for more details'})
else:
display = redirect('/')
else:
if this_user.acl.CanAddMoreEmployee:
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = render(request, 'access_denied.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
return display
@login_required(login_url='/login/')
def change_status(request):
user = request.session['user']
get_data = request.GET
if AllUsers.objects.filter(username__exact=user).exists():
new_status = Status.objects.get(StatusKey=get_data['to'])
this_user = AllUsers.objects.get(username__exact=user)
current_status = this_user.Status
print(current_status.StatusKey)
print(get_data['to'])
if ((get_data['to'] == 'office' or get_data['to'] == 'away' or
get_data['to'] == 'meeting' or get_data['to'] == 'out') and current_status.StatusKey != 'home') or \
get_data['to'] == 'home' and current_status.StatusKey == 'out' or \
get_data['to'] == 'out' and current_status.StatusKey == 'home':
if (get_data['to'] == 'office' or get_data['to'] == 'away' or get_data['to'] == 'meeting') \
and current_status.StatusKey == 'out':
new_office_attendance = AttendanceInOffice(User=this_user)
new_office_attendance.save()
elif get_data['to'] == 'home'and current_status.StatusKey == 'out':
new_home_attendance = AttendanceInHome(User=this_user)
new_home_attendance.save()
elif get_data['to'] == 'out'and current_status.StatusKey == 'home':
new_home_attendance = AttendanceInHome.objects.get(User=this_user, ExitTime=None)
print(datetime.datetime.now())
new_home_attendance.ExitTime = datetime.datetime.now()
new_home_attendance.save()
elif get_data['to'] == 'out'and (current_status.StatusKey == 'office' or
current_status.StatusKey == 'away' or
current_status.StatusKey == 'meeting'):
new_office_attendance = AttendanceInOffice.objects.get(User=this_user, ExitTime=None)
print(datetime.datetime.now())
new_office_attendance.ExitTime = datetime.datetime.now()
new_office_attendance.save()
this_user.Status = new_status
this_user.save()
display = redirect('/')
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def employee_list(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
if this_user.acl.CanSeeOthersStatus:
all_employees = AllUsers.objects.all()
display = render(request, 'admin_list.html', {'page_title': 'Add Employee',
'login_user': this_user,
'all_employees': all_employees,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = render(request, 'access_denied.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def all_task(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
# if this_user.acl.CanSeeOthersStatus:
all_tasks = Task.objects.filter(AssignedTo=this_user)
assigned_tasks = Task.objects.filter(AssignedBy=this_user)
display = render(request, 'all_task.html', {'page_title': 'Task List',
'login_user': this_user,
'all_tasks': all_tasks,
'assigned_tasks': assigned_tasks,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def attendance(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
# if this_user.acl.CanSeeOthersStatus:
office_work = AttendanceInOffice.objects.filter(User=this_user)
home_work = AttendanceInHome.objects.filter(User=this_user)
display = render(request, 'attendance.html', {'page_title': 'Attendance',
'login_user': this_user,
'office_work': office_work,
'home_work': home_work,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def profile(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
display = render(request, 'profile.html', {'page_title': 'Profile',
'login_user': this_user,
'this_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
| gpl-2.0 | -5,272,535,817,592,566,000 | 53.114187 | 127 | 0.475478 | false | 4.843295 | false | false | false |
qingtech/weibome | weibome/settings.py | 1 | 5454 | # Django settings for weibome project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
import os
if 'SERVER_SOFTWARE' in os.environ:
from sae.const import (
MYSQL_HOST, MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB
)
else:
# Make `python manage.py syncdb` works happy!
MYSQL_HOST = 'localhost'
MYSQL_PORT = '3306'
MYSQL_USER = 'root'
MYSQL_PASS = '123'
MYSQL_DB = 'weibome'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#7-*s-)n!pnjrlv@f%f4&pn+#lr8)3o!5j-d-(is2accw!9x5p'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'weibome.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'weibome.wsgi.application'
import os.path
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'weime',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | 3,294,211,815,736,853,500 | 30.526012 | 88 | 0.688302 | false | 3.530097 | false | false | false |
vbraun/SageUI | src/sageui/view/trac_window.py | 1 | 13199 | """
Window showing a Trac Ticket
"""
##############################################################################
# SageUI: A graphical user interface to Sage, Trac, and Git.
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import logging
import gtk
import gobject
import pango
from gtksourceview2 import View as GtkSourceView
from buildable import Buildable
from window import Window
from terminal_widget import TerminalWidget
class TracWindowUpdater(object):
def __init__(self, trac_window, timeout=1):
self.trac_window = trac_window
self.counter = 0
gobject.timeout_add_seconds(timeout, self.callback)
def callback(self):
self.counter += 1
#print 'updating trac window', str(self.counter)
if not self.trac_window.window.get_visible():
return False
self.trac_window.update_ticket_age()
return True
class TracWindow(Buildable, Window):
def __init__(self, presenter, glade_file):
self.presenter = presenter
Buildable.__init__(self, ['trac_window', 'trac_menubar', 'trac_toolbar',
'trac_tool_web', 'trac_tool_git', 'trac_tool_refresh',
'trac_tool_git_icon',
'trac_ticketlist_store', 'trac_ticketlist_view',
'trac_search_entry',
'trac_comments',
'trac_comment_text', 'trac_comment_buffer'])
builder = self.get_builder(glade_file)
Window.__init__(self, builder, 'trac_window')
self.menu = builder.get_object('trac_menubar')
self.toolbar = builder.get_object('trac_toolbar')
self.search_entry = builder.get_object('trac_search_entry')
self.ticketlist_store = builder.get_object('trac_ticketlist_store')
self.ticketlist_view = builder.get_object('trac_ticketlist_view')
self._init_ticketlist(self.ticketlist_view)
self.comments = builder.get_object('trac_comments')
self._init_comments(self.comments)
self.comment_text = builder.get_object('trac_comment_text')
self.comment_buffer = builder.get_object('trac_comment_buffer')
self.toolbar_web = builder.get_object('trac_tool_web')
self.toolbar_refresh = builder.get_object('trac_tool_refresh')
self.toolbar_git = builder.get_object('trac_tool_git')
builder.connect_signals(self)
self.ticket_list = None
self.current_ticket = None
def _init_ticketlist(self, listview):
listview.get_selection().set_mode(gtk.SELECTION_BROWSE)
# add two columns
self.col_title = gtk.TreeViewColumn('Description')
self.col_time = gtk.TreeViewColumn('Last seen')
listview.append_column(self.col_title)
listview.append_column(self.col_time)
# create a CellRenderers to render the data
self.cell_title = gtk.CellRendererText()
self.cell_title.set_property('ellipsize', pango.ELLIPSIZE_END)
self.cell_time = gtk.CellRendererText()
# add the cells to the columns - 2 in the first
self.col_title.pack_start(self.cell_title, True)
self.col_title.set_attributes(self.cell_title, markup=1)
self.col_title.set_resizable(True)
self.col_title.set_expand(True)
self.col_time.pack_end(self.cell_time, True)
self.col_time.set_attributes(self.cell_time, markup=2)
#self.col_time.set_expand(True)
def _init_comments(self, comments):
color = gtk.gdk.color_parse('#F0EAD6')
comments.modify_base(gtk.STATE_NORMAL, color)
tag_table = comments.get_buffer().get_tag_table()
tag = gtk.TextTag('warning')
tag.set_property('foreground', 'red')
tag_table.add(tag)
tag = gtk.TextTag('label')
tag.set_property('foreground', 'blue')
tag.set_property('style', pango.STYLE_ITALIC)
tag_table.add(tag)
tag = gtk.TextTag('description')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
tag = gtk.TextTag('trac_field')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('weight', pango.WEIGHT_SEMIBOLD)
tag_table.add(tag)
tag = gtk.TextTag('comment')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
tag = gtk.TextTag('title')
tag.set_property('foreground', 'black')
tag.set_property('weight', pango.WEIGHT_BOLD)
tag.set_property('scale', pango.SCALE_X_LARGE)
tag_table.add(tag)
tag = gtk.TextTag('debug')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
def show(self):
super(TracWindow, self).show()
TracWindowUpdater(self)
def set_ticket_list(self, ticket_list, current_ticket=None):
assert (current_ticket is None) or (current_ticket in ticket_list)
self.ticket_list = ticket_list
self.ticketlist_store.clear()
for ticket in ticket_list:
n = ticket.get_number()
row = [n,
'<b>#'+str(n)+'</b> '+ticket.get_title(),
str(ticket.get_pretty_last_viewed_time())]
self.ticketlist_store.append(row)
self.set_current_ticket(current_ticket)
def get_ticket_numbers(self):
result = []
store = self.ticketlist_store
iter = store.get_iter_first()
while iter is not None:
result.append(store.get_value(iter, 0))
return tuple(result)
def set_current_ticket(self, ticket):
"""
Select ``ticket`` in the ticket list.
Also, updates the "Last seen" field since it probably changed to right now.
"""
self.current_ticket = ticket
sel = self.ticketlist_view.get_selection()
if ticket is None:
sel.unselect_all()
self.toolbar_refresh.set_sensitive(False)
self.toolbar_web.set_sensitive(False)
self.toolbar_git.set_sensitive(False)
return
assert ticket in self.ticket_list
ticket_number = ticket.get_number()
store = self.ticketlist_store
iter = store.get_iter_first()
while (iter is not None) and (store.get_value(iter, 0) != ticket_number):
iter = store.iter_next(iter)
assert iter != None
sel.select_iter(iter)
self.toolbar_refresh.set_sensitive(True)
self.toolbar_web.set_sensitive(True)
self.toolbar_git.set_sensitive(ticket.get_branch() is not None)
self.update_ticket_age([ticket])
def update_ticket_age(self, tickets=None):
if tickets is None:
tickets = self.ticket_list
if tickets is None:
return
ticket_by_number = dict()
for ticket in self.ticket_list:
ticket_by_number[ticket.get_number()] = ticket
store = self.ticketlist_store
iter = store.get_iter_first()
while iter is not None:
n = store.get_value(iter, 0)
ticket = ticket_by_number[n]
store.set(iter, 2, str(ticket.get_pretty_last_viewed_time()))
iter = store.iter_next(iter)
def on_trac_ticketlist_view_cursor_changed(self, widget, data=None):
model, iter = self.ticketlist_view.get_selection().get_selected()
if not iter:
return
ticket_number = model.get_value(iter, 0)
logging.info('trac ticket cursor changed to #%s', ticket_number)
self.presenter.ticket_selected(ticket_number)
def display_ticket(self, ticket):
buf = self.comments.get_buffer()
buf.set_text('')
if ticket is None:
return
def append(*args):
buf.insert_with_tags(buf.get_end_iter(), *args)
tag_table = buf.get_tag_table()
warn_tag = tag_table.lookup('warning')
title_tag = tag_table.lookup('title')
label_tag = tag_table.lookup('label')
trac_field_tag = tag_table.lookup('trac_field')
description_tag = tag_table.lookup('description')
comment_tag = tag_table.lookup('comment')
debug_tag = tag_table.lookup('debug')
append('Trac #'+str(ticket.get_number())+': '+ticket.get_title(), title_tag)
append('\n\n')
branch = ticket.get_branch()
if branch is not None:
append('Branch: ', label_tag)
append(branch, trac_field_tag)
append('\n')
deps = ticket.get_dependencies()
if deps is not None:
append('Dependencies: ', label_tag)
append(deps, trac_field_tag)
append('\n')
append('Description:\n', label_tag)
append(ticket.get_description().strip(), description_tag)
for comment in ticket.comment_iter():
append('\n\n')
author = comment.get_author()
time = comment.get_ctime().ctime()
append('Comment (by {0} on {1}):\n'.format(author, time), label_tag)
append(comment.get_comment().strip(), comment_tag)
append('\n\n')
append('Created: ', label_tag)
append(ticket.get_ctime().ctime(), trac_field_tag)
append('\t Last modified: ', label_tag)
append(ticket.get_mtime().ctime(), trac_field_tag)
append('\n\n')
append(str(ticket._data), debug_tag)
append('\n')
for log in ticket._change_log:
append(str(log) + '\n', debug_tag)
def on_trac_window_delete_event(self, widget, data=None):
self.presenter.hide_trac_window()
return True
def on_trac_menu_close_activate(self, widget, data=None):
self.presenter.hide_trac_window()
def on_trac_window_map(self, widget, data=None):
print 'trac window map'
def on_trac_menu_new_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac new ticket")
def on_trac_menu_open_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac open ticket")
def on_trac_menu_about_activate(self, widget, data=None):
self.presenter.show_about_dialog()
def on_trac_menu_cut_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac cut")
def on_trac_menu_copy_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac copy")
def on_trac_menu_paste_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac paste")
def on_trac_menu_delete_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac delete")
def on_trac_menu_preferences_activate(self, widget, data=None):
self.presenter.show_preferences_dialog()
def on_trac_tool_new_clicked(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac new ticket")
def on_trac_tool_web_clicked(self, widget, data=None):
url = 'http://trac.sagemath.org/{0}'.format(self.current_ticket.get_number())
self.presenter.xdg_open(url)
def on_trac_tool_git_clicked(self, widget, data=None):
branch = self.current_ticket.get_branch()
assert branch is not None # button should have been disabled
number = self.current_ticket.get_number()
logging.info('git button for %s %s', branch, number)
self.presenter.checkout_branch(branch, number)
self.presenter.show_git_window()
def on_trac_tool_refresh_clicked(self, widget, data=None):
self.presenter.load_ticket(self.current_ticket)
def on_trac_search_entry_activate(self, widget, data=None):
entry = self.search_entry.get_buffer().get_text()
entry = entry.strip('# ')
logging.info('searching trac for %s', entry)
try:
ticket_number = int(entry)
self.presenter.load_ticket(ticket_number)
except ValueError:
self.presenter.show_error(self, 'Invalid ticket number', 'Expected integer, got: '+entry)
| gpl-3.0 | 6,258,722,187,542,107,000 | 39.48773 | 101 | 0.608152 | false | 3.746523 | false | false | false |
berkeley-stat159/project-lambda | code/stat159lambda/utils/tests/test_parse_demographics.py | 1 | 1388 | from __future__ import absolute_import
from stat159lambda.utils import parse_demographics
import os
import csv
def prepare_for_tests():
with open('demographics.csv', 'w') as csvfile:
file_writer = csv.writer(csvfile, delimiter=',', quotechar='"')
file_writer.writerow(['id', 'gender', 'age', 'forrest_seen_count'])
file_writer.writerow(['1', 'm', '30-35', '5'])
file_writer.writerow(['2', 'm', '30-35', '1'])
test_object = parse_demographics.parse_csv('demographics.csv')
return test_object
def test_seen_most_times():
test_subjects = prepare_for_tests()
seen_count = parse_demographics.seen_most_times(test_subjects)
assert seen_count[0] == 5
assert seen_count[1] == 1
delete_file()
def test_seen_least_times():
test_subjects = prepare_for_tests()
seen_count = parse_demographics.seen_least_times(test_subjects)
assert seen_count[0] == 1
assert seen_count[1] == 2
delete_file()
def test_find_id_by_gender():
test_subjects = prepare_for_tests()
id_list = parse_demographics.find_id_by_gender(test_subjects, 'm')
assert len(id_list) == 2
assert id_list[0] == 'm'
assert id_list[1] == 'm'
delete_file()
def test_find_count_by_id():
test_subjects = prepare_for_tests()
count = parse_demographics.find_count_by_id(test_subjects, 1)
assert count == 5
delete_file()
def delete_file():
os.remove('demographics.csv')
| bsd-3-clause | -6,484,319,558,559,641,000 | 26.215686 | 75 | 0.676513 | false | 2.922105 | true | false | false |
trabucayre/gnuradio | gr-audio/examples/python/dial_tone_daemon.py | 1 | 1411 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gru
from gnuradio import audio
from gnuradio.eng_arg import eng_float
from argparse import ArgumentParser
import os
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = ArgumentParser()
parser.add_argument("-O", "--audio-output", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_argument("-r", "--sample-rate", type=eng_float, default=48000,
help="set sample rate to RATE (%(default)r)")
args = parser.parse_args()
sample_rate = int(args.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, args.audio_output)
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
if __name__ == '__main__':
pid = gru.daemonize()
print("To stop this program, enter 'kill %d'" % pid)
my_top_block().run()
| gpl-3.0 | -8,665,632,935,623,739,000 | 29.673913 | 83 | 0.61871 | false | 3.273782 | false | false | false |
mtl/svg2mod | svg2mod/svg2mod.py | 1 | 39409 | #!/usr/bin/python
from __future__ import absolute_import
import argparse
import datetime
import os
from pprint import pformat, pprint
import re
import svg2mod.svg as svg
import sys
#----------------------------------------------------------------------------
DEFAULT_DPI = 96 # 96 as of Inkscape 0.92
def main():
args, parser = get_arguments()
pretty = args.format == 'pretty'
use_mm = args.units == 'mm'
if pretty:
if not use_mm:
print( "Error: decimil units only allowed with legacy output type" )
sys.exit( -1 )
#if args.include_reverse:
#print(
#"Warning: reverse footprint not supported or required for" +
#" pretty output format"
#)
# Import the SVG:
imported = Svg2ModImport(
args.input_file_name,
args.module_name,
args.module_value
)
# Pick an output file name if none was provided:
if args.output_file_name is None:
args.output_file_name = os.path.splitext(
os.path.basename( args.input_file_name )
)[ 0 ]
# Append the correct file name extension if needed:
if pretty:
extension = ".kicad_mod"
else:
extension = ".mod"
if args.output_file_name[ - len( extension ) : ] != extension:
args.output_file_name += extension
# Create an exporter:
if pretty:
exported = Svg2ModExportPretty(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
)
else:
# If the module file exists, try to read it:
exported = None
if os.path.isfile( args.output_file_name ):
try:
exported = Svg2ModExportLegacyUpdater(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
include_reverse = not args.front_only,
)
except Exception as e:
raise e
#print( e.message )
#exported = None
# Write the module file:
if exported is None:
exported = Svg2ModExportLegacy(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
use_mm = use_mm,
dpi = args.dpi,
include_reverse = not args.front_only,
)
# Export the footprint:
exported.write()
#----------------------------------------------------------------------------
class LineSegment( object ):
#------------------------------------------------------------------------
@staticmethod
def _on_segment( p, q, r ):
""" Given three colinear points p, q, and r, check if
point q lies on line segment pr. """
if (
q.x <= max( p.x, r.x ) and
q.x >= min( p.x, r.x ) and
q.y <= max( p.y, r.y ) and
q.y >= min( p.y, r.y )
):
return True
return False
#------------------------------------------------------------------------
@staticmethod
def _orientation( p, q, r ):
""" Find orientation of ordered triplet (p, q, r).
Returns following values
0 --> p, q and r are colinear
1 --> Clockwise
2 --> Counterclockwise
"""
val = (
( q.y - p.y ) * ( r.x - q.x ) -
( q.x - p.x ) * ( r.y - q.y )
)
if val == 0: return 0
if val > 0: return 1
return 2
#------------------------------------------------------------------------
def __init__( self, p = None, q = None ):
self.p = p
self.q = q
#------------------------------------------------------------------------
def connects( self, segment ):
if self.q.x == segment.p.x and self.q.y == segment.p.y: return True
if self.q.x == segment.q.x and self.q.y == segment.q.y: return True
if self.p.x == segment.p.x and self.p.y == segment.p.y: return True
if self.p.x == segment.q.x and self.p.y == segment.q.y: return True
return False
#------------------------------------------------------------------------
def intersects( self, segment ):
""" Return true if line segments 'p1q1' and 'p2q2' intersect.
Adapted from:
http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
"""
# Find the four orientations needed for general and special cases:
o1 = self._orientation( self.p, self.q, segment.p )
o2 = self._orientation( self.p, self.q, segment.q )
o3 = self._orientation( segment.p, segment.q, self.p )
o4 = self._orientation( segment.p, segment.q, self.q )
return (
# General case:
( o1 != o2 and o3 != o4 )
or
# p1, q1 and p2 are colinear and p2 lies on segment p1q1:
( o1 == 0 and self._on_segment( self.p, segment.p, self.q ) )
or
# p1, q1 and p2 are colinear and q2 lies on segment p1q1:
( o2 == 0 and self._on_segment( self.p, segment.q, self.q ) )
or
# p2, q2 and p1 are colinear and p1 lies on segment p2q2:
( o3 == 0 and self._on_segment( segment.p, self.p, segment.q ) )
or
# p2, q2 and q1 are colinear and q1 lies on segment p2q2:
( o4 == 0 and self._on_segment( segment.p, self.q, segment.q ) )
)
#------------------------------------------------------------------------
def q_next( self, q ):
self.p = self.q
self.q = q
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class PolygonSegment( object ):
#------------------------------------------------------------------------
def __init__( self, points ):
self.points = points
if len( points ) < 3:
print(
"Warning:"
" Path segment has only {} points (not a polygon?)".format(
len( points )
)
)
#------------------------------------------------------------------------
# KiCad will not "pick up the pen" when moving between a polygon outline
# and holes within it, so we search for a pair of points connecting the
# outline (self) to the hole such that the connecting segment will not
# cross the visible inner space within any hole.
def _find_insertion_point( self, hole, holes ):
#print( " Finding insertion point. {} holes".format( len( holes ) ) )
# Try the next point on the container:
for cp in range( len( self.points ) ):
container_point = self.points[ cp ]
#print( " Trying container point {}".format( cp ) )
# Try the next point on the hole:
for hp in range( len( hole.points ) - 1 ):
hole_point = hole.points[ hp ]
#print( " Trying hole point {}".format( cp ) )
bridge = LineSegment( container_point, hole_point )
# Check for intersection with each other hole:
for other_hole in holes:
#print( " Trying other hole. Check = {}".format( hole == other_hole ) )
# If the other hole intersects, don't bother checking
# remaining holes:
if other_hole.intersects(
bridge,
check_connects = (
other_hole == hole or other_hole == self
)
): break
#print( " Hole does not intersect." )
else:
print( " Found insertion point: {}, {}".format( cp, hp ) )
# No other holes intersected, so this insertion point
# is acceptable:
return ( cp, hole.points_starting_on_index( hp ) )
print(
"Could not insert segment without overlapping other segments"
)
#------------------------------------------------------------------------
# Return the list of ordered points starting on the given index, ensuring
# that the first and last points are the same.
def points_starting_on_index( self, index ):
points = self.points
if index > 0:
# Strip off end point, which is a duplicate of the start point:
points = points[ : -1 ]
points = points[ index : ] + points[ : index ]
points.append(
svg.Point( points[ 0 ].x, points[ 0 ].y )
)
return points
#------------------------------------------------------------------------
# Return a list of points with the given polygon segments (paths) inlined.
def inline( self, segments ):
if len( segments ) < 1:
return self.points
print( " Inlining {} segments...".format( len( segments ) ) )
all_segments = segments[ : ] + [ self ]
insertions = []
# Find the insertion point for each hole:
for hole in segments:
insertion = self._find_insertion_point(
hole, all_segments
)
if insertion is not None:
insertions.append( insertion )
insertions.sort( key = lambda i: i[ 0 ] )
inlined = [ self.points[ 0 ] ]
ip = 1
points = self.points
for insertion in insertions:
while ip <= insertion[ 0 ]:
inlined.append( points[ ip ] )
ip += 1
if (
inlined[ -1 ].x == insertion[ 1 ][ 0 ].x and
inlined[ -1 ].y == insertion[ 1 ][ 0 ].y
):
inlined += insertion[ 1 ][ 1 : -1 ]
else:
inlined += insertion[ 1 ]
inlined.append( svg.Point(
points[ ip - 1 ].x,
points[ ip - 1 ].y,
) )
while ip < len( points ):
inlined.append( points[ ip ] )
ip += 1
return inlined
#------------------------------------------------------------------------
def intersects( self, line_segment, check_connects ):
hole_segment = LineSegment()
# Check each segment of other hole for intersection:
for point in self.points:
hole_segment.q_next( point )
if hole_segment.p is not None:
if (
check_connects and
line_segment.connects( hole_segment )
): continue
if line_segment.intersects( hole_segment ):
#print( "Intersection detected." )
return True
return False
#------------------------------------------------------------------------
# Apply all transformations and rounding, then remove duplicate
# consecutive points along the path.
def process( self, transformer, flip ):
points = []
for point in self.points:
point = transformer.transform_point( point, flip )
if (
len( points ) < 1 or
point.x != points[ -1 ].x or
point.y != points[ -1 ].y
):
points.append( point )
if (
points[ 0 ].x != points[ -1 ].x or
points[ 0 ].y != points[ -1 ].y
):
#print( "Warning: Closing polygon. start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
points.append( svg.Point(
points[ 0 ].x,
points[ 0 ].y,
) )
#else:
#print( "Polygon closed: start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
self.points = points
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModImport( object ):
#------------------------------------------------------------------------
def __init__( self, file_name, module_name, module_value ):
self.file_name = file_name
self.module_name = module_name
self.module_value = module_value
print( "Parsing SVG..." )
self.svg = svg.parse( file_name )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExport( object ):
#------------------------------------------------------------------------
@staticmethod
def _convert_decimil_to_mm( decimil ):
return float( decimil ) * 0.00254
#------------------------------------------------------------------------
@staticmethod
def _convert_mm_to_decimil( mm ):
return int( round( mm * 393.700787 ) )
#------------------------------------------------------------------------
def _get_fill_stroke( self, item ):
fill = True
stroke = True
stroke_width = 0.0
if item.style is not None and item.style != "":
for property in item.style.split( ";" ):
nv = property.split( ":" );
name = nv[ 0 ].strip()
value = nv[ 1 ].strip()
if name == "fill" and value == "none":
fill = False
elif name == "stroke" and value == "none":
stroke = False
elif name == "stroke-width":
value = value.replace( "px", "" )
stroke_width = float( value ) * 25.4 / float(self.dpi)
if not stroke:
stroke_width = 0.0
elif stroke_width is None:
# Give a default stroke width?
stroke_width = self._convert_decimil_to_mm( 1 )
return fill, stroke, stroke_width
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
):
if use_mm:
# 25.4 mm/in;
scale_factor *= 25.4 / float(dpi)
use_mm = True
else:
# PCBNew uses "decimil" (10K DPI);
scale_factor *= 10000.0 / float(dpi)
self.imported = svg2mod_import
self.file_name = file_name
self.scale_factor = scale_factor
self.precision = precision
self.use_mm = use_mm
self.dpi = dpi
#------------------------------------------------------------------------
def _calculate_translation( self ):
min_point, max_point = self.imported.svg.bbox()
# Center the drawing:
adjust_x = min_point.x + ( max_point.x - min_point.x ) / 2.0
adjust_y = min_point.y + ( max_point.y - min_point.y ) / 2.0
self.translation = svg.Point(
0.0 - adjust_x,
0.0 - adjust_y,
)
#------------------------------------------------------------------------
# Find and keep only the layers of interest.
def _prune( self, items = None ):
if items is None:
self.layers = {}
for name in self.layer_map.iterkeys():
self.layers[ name ] = None
items = self.imported.svg.items
self.imported.svg.items = []
for item in items:
if not isinstance( item, svg.Group ):
continue
for name in self.layers.iterkeys():
#if re.search( name, item.name, re.I ):
if name == item.name:
print( "Found SVG layer: {}".format( item.name ) )
self.imported.svg.items.append( item )
self.layers[ name ] = item
break
else:
self._prune( item.items )
#------------------------------------------------------------------------
def _write_items( self, items, layer, flip = False ):
for item in items:
if isinstance( item, svg.Group ):
self._write_items( item.items, layer, flip )
continue
elif isinstance( item, svg.Path ):
segments = [
PolygonSegment( segment )
for segment in item.segments(
precision = self.precision
)
]
for segment in segments:
segment.process( self, flip )
if len( segments ) > 1:
points = segments[ 0 ].inline( segments[ 1 : ] )
elif len( segments ) > 0:
points = segments[ 0 ].points
fill, stroke, stroke_width = self._get_fill_stroke( item )
if not self.use_mm:
stroke_width = self._convert_mm_to_decimil(
stroke_width
)
print( " Writing polygon with {} points".format(
len( points ) )
)
self._write_polygon(
points, layer, fill, stroke, stroke_width
)
else:
print( "Unsupported SVG element: {}".format(
item.__class__.__name__
) )
#------------------------------------------------------------------------
def _write_module( self, front ):
module_name = self._get_module_name( front )
min_point, max_point = self.imported.svg.bbox()
min_point = self.transform_point( min_point, flip = False )
max_point = self.transform_point( max_point, flip = False )
label_offset = 1200
label_size = 600
label_pen = 120
if self.use_mm:
label_size = self._convert_decimil_to_mm( label_size )
label_pen = self._convert_decimil_to_mm( label_pen )
reference_y = min_point.y - self._convert_decimil_to_mm( label_offset )
value_y = max_point.y + self._convert_decimil_to_mm( label_offset )
else:
reference_y = min_point.y - label_offset
value_y = max_point.y + label_offset
self._write_module_header(
label_size, label_pen,
reference_y, value_y,
front,
)
for name, group in self.layers.iteritems():
if group is None: continue
layer = self._get_layer_name( name, front )
#print( " Writing layer: {}".format( name ) )
self._write_items( group.items, layer, not front )
self._write_module_footer( front )
#------------------------------------------------------------------------
def _write_polygon_filled( self, points, layer, stroke_width = 0.0 ):
self._write_polygon_header( points, layer )
for point in points:
self._write_polygon_point( point )
self._write_polygon_footer( layer, stroke_width )
#------------------------------------------------------------------------
def _write_polygon_outline( self, points, layer, stroke_width ):
prior_point = None
for point in points:
if prior_point is not None:
self._write_polygon_segment(
prior_point, point, layer, stroke_width
)
prior_point = point
#------------------------------------------------------------------------
def transform_point( self, point, flip = False ):
transformed_point = svg.Point(
( point.x + self.translation.x ) * self.scale_factor,
( point.y + self.translation.y ) * self.scale_factor,
)
if flip:
transformed_point.x *= -1
if self.use_mm:
transformed_point.x = round( transformed_point.x, 12 )
transformed_point.y = round( transformed_point.y, 12 )
else:
transformed_point.x = int( round( transformed_point.x ) )
transformed_point.y = int( round( transformed_point.y ) )
return transformed_point
#------------------------------------------------------------------------
def write( self ):
self._prune()
# Must come after pruning:
translation = self._calculate_translation()
print( "Writing module file: {}".format( self.file_name ) )
self.output_file = open( self.file_name, 'w' )
self._write_library_intro()
self._write_modules()
self.output_file.close()
self.output_file = None
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacy( Svg2ModExport ):
layer_map = {
#'inkscape-name' : [ kicad-front, kicad-back ],
'Cu' : [ 15, 0 ],
'Adhes' : [ 17, 16 ],
'Paste' : [ 19, 18 ],
'SilkS' : [ 21, 20 ],
'Mask' : [ 23, 22 ],
'Dwgs.User' : [ 24, 24 ],
'Cmts.User' : [ 25, 25 ],
'Eco1.User' : [ 26, 26 ],
'Eco2.User' : [ 27, 27 ],
'Edge.Cuts' : [ 28, 28 ],
}
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
include_reverse = True,
):
super( Svg2ModExportLegacy, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
)
self.include_reverse = include_reverse
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
layer_info = self.layer_map[ name ]
layer = layer_info[ 0 ]
if not front and layer_info[ 1 ] is not None:
layer = layer_info[ 1 ]
return layer
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
if self.include_reverse and not front:
return self.imported.module_name + "-rev"
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
modules_list = self._get_module_name( front = True )
if self.include_reverse:
modules_list += (
"\n" +
self._get_module_name( front = False )
)
units = ""
if self.use_mm:
units = "\nUnits mm"
self.output_file.write( """PCBNEW-LibModule-V1 {0}{1}
$INDEX
{2}
$EndINDEX
#
# {3}
#
""".format(
datetime.datetime.now().strftime( "%a %d %b %Y %I:%M:%S %p %Z" ),
units,
modules_list,
self.imported.file_name,
)
)
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self.output_file.write( """$MODULE {0}
Po 0 0 0 {6} 00000000 00000000 ~~
Li {0}
T0 0 {1} {2} {2} 0 {3} N I 21 "{0}"
T1 0 {5} {2} {2} 0 {3} N I 21 "{4}"
""".format(
self._get_module_name( front ),
reference_y,
label_size,
label_pen,
self.imported.module_value,
value_y,
15, # Seems necessary
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write(
"$EndMODULE {0}\n".format( self._get_module_name( front ) )
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
if self.include_reverse:
self._write_module( front = False )
self.output_file.write( "$EndLIBRARY" )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer
)
if stroke:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
pass
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
pen = 1
if self.use_mm:
pen = self._convert_decimil_to_mm( pen )
self.output_file.write( "DP 0 0 0 0 {} {} {}\n".format(
len( points ),
pen,
layer
) )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
"Dl {} {}\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write( "DS {} {} {} {} {} {}\n".format(
p.x, p.y,
q.x, q.y,
stroke_width,
layer
) )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacyUpdater( Svg2ModExportLegacy ):
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
dpi = DEFAULT_DPI,
include_reverse = True,
):
self.file_name = file_name
use_mm = self._parse_output_file()
super( Svg2ModExportLegacyUpdater, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
include_reverse,
)
#------------------------------------------------------------------------
def _parse_output_file( self ):
print( "Parsing module file: {}".format( self.file_name ) )
module_file = open( self.file_name, 'r' )
lines = module_file.readlines()
module_file.close()
self.loaded_modules = {}
self.post_index = []
self.pre_index = []
use_mm = False
index = 0
# Find the start of the index:
while index < len( lines ):
line = lines[ index ]
index += 1
self.pre_index.append( line )
if line[ : 6 ] == "$INDEX":
break
m = re.match( "Units[\s]+mm[\s]*", line )
if m is not None:
print( " Use mm detected" )
use_mm = True
# Read the index:
while index < len( lines ):
line = lines[ index ]
if line[ : 9 ] == "$EndINDEX":
break
index += 1
self.loaded_modules[ line.strip() ] = []
# Read up until the first module:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
break
index += 1
self.post_index.append( line )
# Read modules:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
module_name, module_lines, index = self._read_module( lines, index )
if module_name is not None:
self.loaded_modules[ module_name ] = module_lines
elif line[ : 11 ] == "$EndLIBRARY":
break
else:
raise Exception(
"Expected $EndLIBRARY: [{}]".format( line )
)
#print( "Pre-index:" )
#pprint( self.pre_index )
#print( "Post-index:" )
#pprint( self.post_index )
#print( "Loaded modules:" )
#pprint( self.loaded_modules )
return use_mm
#------------------------------------------------------------------------
def _read_module( self, lines, index ):
# Read module name:
m = re.match( r'\$MODULE[\s]+([^\s]+)[\s]*', lines[ index ] )
module_name = m.group( 1 )
print( " Reading module {}".format( module_name ) )
index += 1
module_lines = []
while index < len( lines ):
line = lines[ index ]
index += 1
m = re.match(
r'\$EndMODULE[\s]+' + module_name + r'[\s]*', line
)
if m is not None:
return module_name, module_lines, index
module_lines.append( line )
raise Exception(
"Could not find end of module '{}'".format( module_name )
)
#------------------------------------------------------------------------
def _write_library_intro( self ):
# Write pre-index:
self.output_file.writelines( self.pre_index )
self.loaded_modules[ self._get_module_name( front = True ) ] = None
if self.include_reverse:
self.loaded_modules[
self._get_module_name( front = False )
] = None
# Write index:
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
self.output_file.write( module_name + "\n" )
# Write post-index:
self.output_file.writelines( self.post_index )
#------------------------------------------------------------------------
def _write_preserved_modules( self, up_to = None ):
if up_to is not None:
up_to = up_to.lower()
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
if up_to is not None and module_name.lower() >= up_to:
continue
module_lines = self.loaded_modules[ module_name ]
if module_lines is not None:
self.output_file.write(
"$MODULE {}\n".format( module_name )
)
self.output_file.writelines( module_lines )
self.output_file.write(
"$EndMODULE {}\n".format( module_name )
)
self.loaded_modules[ module_name ] = None
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
super( Svg2ModExportLegacyUpdater, self )._write_module_footer(
front,
)
# Write remaining modules:
if not front:
self._write_preserved_modules()
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self._write_preserved_modules(
up_to = self._get_module_name( front )
)
super( Svg2ModExportLegacyUpdater, self )._write_module_header(
label_size,
label_pen,
reference_y,
value_y,
front,
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportPretty( Svg2ModExport ):
layer_map = {
#'inkscape-name' : kicad-name,
'Cu' : "{}.Cu",
'Adhes' : "{}.Adhes",
'Paste' : "{}.Paste",
'SilkS' : "{}.SilkS",
'Mask' : "{}.Mask",
'CrtYd' : "{}.CrtYd",
'Fab' : "{}.Fab",
'Edge.Cuts' : "Edge.Cuts"
}
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
if front:
return self.layer_map[ name ].format("F")
else:
return self.layer_map[ name ].format("B")
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
self.output_file.write( """(module {0} (layer F.Cu) (tedit {1:8X})
(attr smd)
(descr "{2}")
(tags {3})
""".format(
self.imported.module_name, #0
int( round( os.path.getctime( #1
self.imported.file_name
) ) ),
"Imported from {}".format( self.imported.file_name ), #2
"svg2mod", #3
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write( "\n)" )
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
if front:
side = "F"
else:
side = "B"
self.output_file.write(
""" (fp_text reference {0} (at 0 {1}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)
(fp_text value {5} (at 0 {6}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)""".format(
self._get_module_name(), #0
reference_y, #1
side, #2
label_size, #3
label_pen, #4
self.imported.module_value, #5
value_y, #6
)
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer, stroke_width
)
# Polygons with a fill and stroke are drawn with the filled polygon
# above:
if stroke and not fill:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
self.output_file.write(
" )\n (layer {})\n (width {})\n )".format(
layer, stroke_width
)
)
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
self.output_file.write( "\n (fp_poly\n (pts \n" )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
" (xy {} {})\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write(
"""\n (fp_line
(start {} {})
(end {} {})
(layer {})
(width {})
)""".format(
p.x, p.y,
q.x, q.y,
layer,
stroke_width,
)
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
def get_arguments():
parser = argparse.ArgumentParser(
description = (
'Convert Inkscape SVG drawings to KiCad footprint modules.'
)
)
#------------------------------------------------------------------------
parser.add_argument(
'-i', '--input-file',
type = str,
dest = 'input_file_name',
metavar = 'FILENAME',
help = "name of the SVG file",
required = True,
)
parser.add_argument(
'-o', '--output-file',
type = str,
dest = 'output_file_name',
metavar = 'FILENAME',
help = "name of the module file",
)
parser.add_argument(
'--name', '--module-name',
type = str,
dest = 'module_name',
metavar = 'NAME',
help = "base name of the module",
default = "svg2mod",
)
parser.add_argument(
'--value', '--module-value',
type = str,
dest = 'module_value',
metavar = 'VALUE',
help = "value of the module",
default = "G***",
)
parser.add_argument(
'-f', '--factor',
type = float,
dest = 'scale_factor',
metavar = 'FACTOR',
help = "scale paths by this factor",
default = 1.0,
)
parser.add_argument(
'-p', '--precision',
type = float,
dest = 'precision',
metavar = 'PRECISION',
help = "smoothness for approximating curves with line segments (float)",
default = 10.0,
)
parser.add_argument(
'--front-only',
dest = 'front_only',
action = 'store_const',
const = True,
help = "omit output of back module (legacy output format)",
default = False,
)
parser.add_argument(
'--format',
type = str,
dest = 'format',
metavar = 'FORMAT',
choices = [ 'legacy', 'pretty' ],
help = "output module file format (legacy|pretty)",
default = 'pretty',
)
parser.add_argument(
'--units',
type = str,
dest = 'units',
metavar = 'UNITS',
choices = [ 'decimil', 'mm' ],
help = "output units, if output format is legacy (decimil|mm)",
default = 'mm',
)
parser.add_argument(
'-d', '--dpi',
type = int,
dest = 'dpi',
metavar = 'DPI',
help = "DPI of the SVG file (int)",
default = DEFAULT_DPI,
)
return parser.parse_args(), parser
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
main()
#----------------------------------------------------------------------------
# vi: set et sts=4 sw=4 ts=4:
| cc0-1.0 | 9,195,613,608,947,256,000 | 26.103851 | 97 | 0.414905 | false | 4.459545 | false | false | false |
mikehulluk/morphforge | src/morphforge/constants/ions.py | 1 | 1668 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
class ChlIon(object):
Na = 'na'
Ks = 'ks'
Kf = 'kf'
Ca = 'ca'
Lk = 'lk'
Chls = [Na, Ks, Kf, Ca, Lk]
| bsd-2-clause | 4,506,075,776,726,834,700 | 37.790698 | 72 | 0.67446 | false | 4.448 | false | false | false |
paninetworks/neutron | neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py | 1 | 46084 | #!/usr/bin/env python
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
import os
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import service
from six import moves
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
LOG = logging.getLogger(__name__)
BRIDGE_NAME_PREFIX = "brq"
# NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap
# devices are listed here (i.e. when using Xen)
BRIDGE_FS = "/sys/class/net/"
BRIDGE_NAME_PLACEHOLDER = "bridge_name"
BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/"
DEVICE_NAME_PLACEHOLDER = "device_name"
BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport"
VXLAN_INTERFACE_PREFIX = "vxlan-"
class NetworkSegment(object):
def __init__(self, network_type, physical_network, segmentation_id):
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
class LinuxBridgeManager(object):
def __init__(self, interface_mappings):
self.interface_mappings = interface_mappings
self.ip = ip_lib.IPWrapper()
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
device = self.ip.get_device_by_ip(self.local_ip)
if device:
self.local_int = device.name
self.check_vxlan_support()
else:
self.local_int = None
LOG.warning(_LW('VXLAN is enabled, a valid local_ip '
'must be provided'))
# Store network mapping to segments
self.network_map = {}
def interface_exists_on_bridge(self, bridge, interface):
directory = '/sys/class/net/%s/brif' % bridge
for filename in os.listdir(directory):
if filename == interface:
return True
return False
def get_bridge_name(self, network_id):
if not network_id:
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
"bridge name"))
bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
subinterface_name = '%s.%s' % (physical_interface, vlan_id)
return subinterface_name
def get_tap_device_name(self, interface_id):
if not interface_id:
LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = constants.TAP_DEVICE_PREFIX + interface_id[0:11]
return tap_device_name
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_all_neutron_bridges(self):
neutron_bridge_list = []
bridge_list = os.listdir(BRIDGE_FS)
for bridge in bridge_list:
if bridge.startswith(BRIDGE_NAME_PREFIX):
neutron_bridge_list.append(bridge)
return neutron_bridge_list
def get_interfaces_on_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
return os.listdir(bridge_interface_path)
else:
return []
def get_tap_devices_count(self, bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
try:
if_list = os.listdir(bridge_interface_path)
return len([interface for interface in if_list if
interface.startswith(constants.TAP_DEVICE_PREFIX)])
except OSError:
return 0
def get_bridge_for_tap_device(self, tap_device_name):
bridges = self.get_all_neutron_bridges()
for bridge in bridges:
interfaces = self.get_interfaces_on_bridge(bridge)
if tap_device_name in interfaces:
return bridge
return None
def is_device_on_bridge(self, device_name):
if not device_name:
return False
else:
bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace(
DEVICE_NAME_PLACEHOLDER, device_name)
return os.path.exists(bridge_port_path)
def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(interface)
if self.ensure_bridge(bridge_name, interface, ips, gateway):
return interface
def ensure_vxlan_bridge(self, network_id, segmentation_id):
"""Create a vxlan and bridge unless they already exist."""
interface = self.ensure_vxlan(segmentation_id)
if not interface:
LOG.error(_LE("Failed creating vxlan interface for "
"%(segmentation_id)s"),
{segmentation_id: segmentation_id})
return
bridge_name = self.get_bridge_name(network_id)
self.ensure_bridge(bridge_name, interface)
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, physical_interface):
"""Create a non-vlan bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips, gateway):
return physical_interface
def ensure_local_bridge(self, network_id):
"""Create a local bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s",
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
if utils.execute(['ip', 'link', 'add', 'link',
physical_interface,
'name', interface, 'type', 'vlan', 'id',
vlan_id], run_as_root=True):
return
if utils.execute(['ip', 'link', 'set',
interface, 'up'], run_as_root=True):
return
LOG.debug("Done creating subinterface %s", interface)
return interface
def ensure_vxlan(self, segmentation_id):
"""Create a vxlan unless it already exists."""
interface = self.get_vxlan_device_name(segmentation_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating vxlan interface %(interface)s for "
"VNI %(segmentation_id)s",
{'interface': interface,
'segmentation_id': segmentation_id})
args = {'dev': self.local_int}
if self.vxlan_mode == lconst.VXLAN_MCAST:
args['group'] = cfg.CONF.VXLAN.vxlan_group
if cfg.CONF.VXLAN.ttl:
args['ttl'] = cfg.CONF.VXLAN.ttl
if cfg.CONF.VXLAN.tos:
args['tos'] = cfg.CONF.VXLAN.tos
if cfg.CONF.VXLAN.l2_population:
args['proxy'] = True
int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args)
int_vxlan.link.set_up()
LOG.debug("Done creating vxlan interface %s", interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(cidr=ip['cidr'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(cidr=ip['cidr'])
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
if bridge_device.setfd(0):
return
if bridge_device.disable_stp():
return
if bridge_device.link.set_up():
return
LOG.debug("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not self.interface_exists_on_bridge(bridge_name, interface):
try:
# Check if the interface is not enslaved in another bridge
if self.is_device_on_bridge(interface):
bridge = self.get_bridge_for_tap_device(interface)
bridge_lib.BridgeDevice(bridge).delif(interface)
bridge_device.addif(interface)
except Exception as e:
LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
"! Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == p_const.TYPE_VXLAN:
if self.vxlan_mode == lconst.VXLAN_NONE:
LOG.error(_LE("Unable to add vxlan interface for network %s"),
network_id)
return
return self.ensure_vxlan_bridge(network_id, segmentation_id)
physical_interface = self.interface_mappings.get(physical_network)
if not physical_interface:
LOG.error(_LE("No mapping for physical network %s"),
physical_network)
return
if network_type == p_const.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_interface)
elif network_type == p_const.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_interface,
segmentation_id)
else:
LOG.error(_LE("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not ip_lib.device_exists(tap_device_name):
LOG.debug("Tap device: %s does not exist on "
"this host, skipped", tap_device_name)
return False
bridge_name = self.get_bridge_name(network_id)
if network_type == p_const.TYPE_LOCAL:
self.ensure_local_bridge(network_id)
else:
phy_dev_name = self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id)
if not phy_dev_name:
return False
self.ensure_tap_mtu(tap_device_name, phy_dev_name)
# Check if device needs to be added to bridge
tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name)
if not tap_device_in_bridge:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s", data)
if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name):
return False
else:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("%(tap_device_name)s already exists on bridge "
"%(bridge_name)s", data)
return True
def ensure_tap_mtu(self, tap_dev_name, phy_dev_name):
"""Ensure the MTU on the tap is the same as the physical device."""
phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu
ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu)
def add_interface(self, network_id, network_type, physical_network,
segmentation_id, port_id):
self.network_map[network_id] = NetworkSegment(network_type,
physical_network,
segmentation_id)
tap_device_name = self.get_tap_device_name(port_id)
return self.add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name)
def delete_vlan_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name):
interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name)
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
if interface.startswith(VXLAN_INTERFACE_PREFIX):
self.delete_vxlan(interface)
continue
for physical_interface in self.interface_mappings.values():
if (interface.startswith(physical_interface)):
ips, gateway = self.get_interface_details(bridge_name)
if ips:
# This is a flat network or a VLAN interface that
# was setup outside of neutron => return IP's from
# bridge to interface
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
elif physical_interface != interface:
self.delete_vlan(interface)
LOG.debug("Deleting bridge %s", bridge_name)
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.link.set_down():
return
if bridge_device.delbr():
return
LOG.debug("Done deleting bridge %s", bridge_name)
else:
LOG.error(_LE("Cannot delete bridge %s, does not exist"),
bridge_name)
def remove_empty_bridges(self):
for network_id in list(self.network_map.keys()):
bridge_name = self.get_bridge_name(network_id)
if not self.get_tap_devices_count(bridge_name):
self.delete_vlan_bridge(bridge_name)
del self.network_map[network_id]
def remove_interface(self, bridge_name, interface_name):
if ip_lib.device_exists(bridge_name):
if not self.is_device_on_bridge(interface_name):
return True
LOG.debug("Removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
if bridge_lib.BridgeDevice(bridge_name).delif(interface_name):
return False
LOG.debug("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_vlan(self, interface):
if ip_lib.device_exists(interface):
LOG.debug("Deleting subinterface %s for vlan", interface)
if utils.execute(['ip', 'link', 'set', interface, 'down'],
run_as_root=True):
return
if utils.execute(['ip', 'link', 'delete', interface],
run_as_root=True):
return
LOG.debug("Done deleting subinterface %s", interface)
def delete_vxlan(self, interface):
if ip_lib.device_exists(interface):
LOG.debug("Deleting vxlan interface %s for vlan",
interface)
int_vxlan = self.ip.device(interface)
int_vxlan.link.set_down()
int_vxlan.link.delete()
LOG.debug("Done deleting vxlan interface %s", interface)
def get_tap_devices(self):
devices = set()
for device in os.listdir(BRIDGE_FS):
if device.startswith(constants.TAP_DEVICE_PREFIX):
devices.add(device)
return devices
def vxlan_ucast_supported(self):
if not cfg.CONF.VXLAN.l2_population:
return False
if not ip_lib.iproute_arg_supported(
['bridge', 'fdb'], 'append'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'append',
'command': 'bridge fdb',
'mode': 'VXLAN UCAST'})
return False
test_iface = None
for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
if not ip_lib.device_exists(
self.get_vxlan_device_name(seg_id)):
test_iface = self.ensure_vxlan(seg_id)
break
else:
LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
return False
try:
utils.execute(
cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
'dev', test_iface, 'dst', '1.1.1.1'],
run_as_root=True, log_fail_as_error=False)
return True
except RuntimeError:
return False
finally:
self.delete_vxlan(test_iface)
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_LW('VXLAN muticast group must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True
def check_vxlan_support(self):
self.vxlan_mode = lconst.VXLAN_NONE
if self.vxlan_ucast_supported():
self.vxlan_mode = lconst.VXLAN_UCAST
elif self.vxlan_mcast_supported():
self.vxlan_mode = lconst.VXLAN_MCAST
else:
raise exceptions.VxlanNetworkUnsupported()
LOG.debug('Using %s VXLAN mode', self.vxlan_mode)
def fdb_ip_entry_exists(self, mac, ip, interface):
entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
'dev', interface],
run_as_root=True)
return mac in entries
def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
run_as_root=True)
if not agent_ip:
return mac in entries
return (agent_ip in entries and mac in entries)
def add_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac,
'dev', interface, 'nud', 'permanent'],
run_as_root=True,
check_exit_code=False)
def remove_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac,
'dev', interface],
run_as_root=True,
check_exit_code=False)
def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def add_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.add_fdb_ip_entry(mac, ip, interface)
self.add_fdb_bridge_entry(mac, agent_ip, interface,
operation="replace")
elif self.vxlan_mode == lconst.VXLAN_UCAST:
if self.fdb_bridge_entry_exists(mac, interface):
self.add_fdb_bridge_entry(mac, agent_ip, interface,
"append")
else:
self.add_fdb_bridge_entry(mac, agent_ip, interface)
def remove_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.remove_fdb_ip_entry(mac, ip, interface)
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2pop_rpc.L2populationRpcCallBackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
target = oslo_messaging.Target(version='1.3')
def __init__(self, context, agent, sg_agent):
super(LinuxBridgeRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
bridge_name = self.agent.br_mgr.get_bridge_name(network_id)
LOG.debug("Delete %s", bridge_name)
self.agent.br_mgr.delete_vlan_bridge(bridge_name)
def port_update(self, context, **kwargs):
port_id = kwargs['port']['id']
tap_name = self.agent.br_mgr.get_tap_device_name(port_id)
# Put the tap name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(tap_name)
LOG.debug("port_update RPC received for port: %s", port_id)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.add_fdb_entries(agent_ip,
ports,
interface)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.remove_fdb_entries(agent_ip,
ports,
interface)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
for network_id, agent_ports in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
for agent_ip, state in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
after = state.get('after', [])
for mac, ip in after:
self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface)
before = state.get('before', [])
for mac, ip in before:
self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface)
def fdb_update(self, context, fdb_entries):
LOG.debug("fdb_update received")
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
class LinuxBridgeNeutronAgentRPC(service.Service):
def __init__(self, interface_mappings, polling_interval,
quitting_rpc_timeout):
"""Constructor.
:param interface_mappings: dict mapping physical_networks to
physical_interfaces.
:param polling_interval: interval (secs) to poll DB.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
stop is called.
"""
super(LinuxBridgeNeutronAgentRPC, self).__init__()
self.interface_mappings = interface_mappings
self.polling_interval = polling_interval
self.quitting_rpc_timeout = quitting_rpc_timeout
def start(self):
self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing
self.setup_linux_bridge(self.interface_mappings)
configurations = {'interface_mappings': self.interface_mappings}
if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE:
configurations['tunneling_ip'] = self.br_mgr.local_ip
configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
self.agent_state = {
'binary': 'neutron-linuxbridge-agent',
'host': cfg.CONF.host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
'start_flag': True}
# stores received port_updates for processing by the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, defer_refresh_firewall=True)
self.setup_rpc(self.interface_mappings.values())
self.daemon_loop()
def stop(self, graceful=True):
LOG.info(_LI("Stopping linuxbridge agent."))
if graceful and self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
super(LinuxBridgeNeutronAgentRPC, self).stop(graceful)
def reset(self):
common_config.setup_logging()
def _report_state(self):
try:
devices = len(self.br_mgr.get_tap_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_rpc(self, physical_interfaces):
if physical_interfaces:
mac = utils.get_interface_mac(physical_interfaces[0])
else:
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
exit(1)
self.agent_id = '%s%s' % ('lb', (mac.replace(":", "")))
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if cfg.CONF.VXLAN.l2_population:
consumers.append([topics.L2POPULATION,
topics.UPDATE, cfg.CONF.host])
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def setup_linux_bridge(self, interface_mappings):
self.br_mgr = LinuxBridgeManager(interface_mappings)
def remove_port_binding(self, network_id, interface_id):
bridge_name = self.br_mgr.get_bridge_name(network_id)
tap_device_name = self.br_mgr.get_tap_device_name(interface_id)
return self.br_mgr.remove_interface(bridge_name, tap_device_name)
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.setup_port_filters(device_info.get('added'),
device_info.get('updated'))
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for "
"%(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port %s added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
if self.prevent_arp_spoofing:
port = self.br_mgr.get_tap_device_name(
device_details['port_id'])
arp_protect.setup_arp_spoofing_protection(port,
device_details)
if device_details['admin_state_up']:
# create the networking for the port
network_type = device_details.get('network_type')
if network_type:
segmentation_id = device_details.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = device_details.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
if self.br_mgr.add_interface(
device_details['network_id'],
network_type,
device_details['physical_network'],
segmentation_id,
device_details['port_id']):
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.remove_port_binding(device_details['network_id'],
device_details['port_id'])
else:
LOG.info(_LI("Device %s not defined on plugin"), device)
return False
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info(_LI("Attachment %s removed"), device)
details = None
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("port_removed failed for %(device)s: %(e)s",
{'device': device, 'e': e})
resync = True
if details and details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
if self.prevent_arp_spoofing:
arp_protect.delete_arp_spoofing_protection(devices)
return resync
def scan_devices(self, previous, sync):
device_info = {}
# Save and reinitialise the set variable that the port_update RPC uses.
# This should be thread-safe as the greenthread should not yield
# between these two statements.
updated_devices = self.updated_devices
self.updated_devices = set()
current_devices = self.br_mgr.get_tap_devices()
device_info['current'] = current_devices
if previous is None:
# This is the first iteration of daemon_loop().
previous = {'added': set(),
'current': set(),
'updated': set(),
'removed': set()}
# clear any orphaned ARP spoofing rules (e.g. interface was
# manually deleted)
if self.prevent_arp_spoofing:
arp_protect.delete_unreferenced_arp_protection(current_devices)
if sync:
# This is the first iteration, or the previous one had a problem.
# Re-add all existing devices.
device_info['added'] = current_devices
# Retry cleaning devices that may not have been cleaned properly.
# And clean any that disappeared since the previous iteration.
device_info['removed'] = (previous['removed'] | previous['current']
- current_devices)
# Retry updating devices that may not have been updated properly.
# And any that were updated since the previous iteration.
# Only update devices that currently exist.
device_info['updated'] = (previous['updated'] | updated_devices
& current_devices)
else:
device_info['added'] = current_devices - previous['current']
device_info['removed'] = previous['current'] - current_devices
device_info['updated'] = updated_devices & current_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def daemon_loop(self):
LOG.info(_LI("LinuxBridge Agent RPC Daemon Started!"))
device_info = None
sync = True
while True:
start = time.time()
device_info = self.scan_devices(previous=device_info, sync=sync)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
sync = False
if (self._device_info_has_changes(device_info)
or self.sg_agent.firewall_refresh_needed()):
LOG.debug("Agent loop found changes! %s", device_info)
try:
sync = self.process_network_devices(device_info)
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.state_rpc):
rpc_api.client.timeout = timeout
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = LinuxBridgeNeutronAgentRPC(interface_mappings,
polling_interval,
quitting_rpc_timeout)
LOG.info(_LI("Agent initialized successfully, now running... "))
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
if __name__ == "__main__":
main()
| apache-2.0 | -1,381,419,549,410,301,700 | 42.230769 | 79 | 0.552578 | false | 4.317002 | false | false | false |
pyrochlore/cycles | src/blender/addon/__init__.py | 1 | 3388 | #
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# <pep8 compliant>
bl_info = {
"name": "Cycles Render Engine",
"author": "",
"blender": (2, 70, 0),
"location": "Info header, render engine menu",
"description": "Cycles Render Engine integration",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Doc:2.6/Manual/Render/Cycles",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Render"}
import bpy
from . import engine
from . import version_update
class CyclesRender(bpy.types.RenderEngine):
bl_idname = 'CYCLES'
bl_label = "Cycles Render"
bl_use_shading_nodes = True
bl_use_preview = True
bl_use_exclude_layers = True
bl_use_save_buffers = True
def __init__(self):
self.session = None
def __del__(self):
engine.free(self)
# final render
def update(self, data, scene):
if self.is_preview:
if not self.session:
cscene = bpy.context.scene.cycles
use_osl = cscene.shading_system and cscene.device == 'CPU'
engine.create(self, data, scene,
None, None, None, use_osl)
else:
if not self.session:
engine.create(self, data, scene)
else:
engine.reset(self, data, scene)
engine.update(self, data, scene)
def render(self, scene):
engine.render(self)
def bake(self, scene, obj, pass_type, pixel_array, num_pixels, depth, result):
engine.bake(self, obj, pass_type, pixel_array, num_pixels, depth, result)
# viewport render
def view_update(self, context):
if not self.session:
engine.create(self, context.blend_data, context.scene,
context.region, context.space_data, context.region_data)
engine.update(self, context.blend_data, context.scene)
def view_draw(self, context):
engine.draw(self, context.region, context.space_data, context.region_data)
def update_script_node(self, node):
if engine.with_osl():
from . import osl
osl.update_script_node(node, self.report)
else:
self.report({'ERROR'}, "OSL support disabled in this build.")
def register():
from . import ui
from . import properties
from . import presets
engine.init()
properties.register()
ui.register()
presets.register()
bpy.utils.register_module(__name__)
bpy.app.handlers.version_update.append(version_update.do_versions)
def unregister():
from . import ui
from . import properties
from . import presets
bpy.app.handlers.version_update.remove(version_update.do_versions)
ui.unregister()
properties.unregister()
presets.unregister()
bpy.utils.unregister_module(__name__)
| apache-2.0 | -5,977,137,211,679,149,000 | 27.957265 | 82 | 0.634002 | false | 3.789709 | false | false | false |
leppa/home-assistant | homeassistant/components/minio/__init__.py | 1 | 8056 | """Minio component."""
import logging
import os
from queue import Queue
import threading
from typing import List
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from .minio_helper import MinioEventThread, create_minio_client
_LOGGER = logging.getLogger(__name__)
DOMAIN = "minio"
CONF_HOST = "host"
CONF_PORT = "port"
CONF_ACCESS_KEY = "access_key"
CONF_SECRET_KEY = "secret_key"
CONF_SECURE = "secure"
CONF_LISTEN = "listen"
CONF_LISTEN_BUCKET = "bucket"
CONF_LISTEN_PREFIX = "prefix"
CONF_LISTEN_SUFFIX = "suffix"
CONF_LISTEN_EVENTS = "events"
ATTR_BUCKET = "bucket"
ATTR_KEY = "key"
ATTR_FILE_PATH = "file_path"
DEFAULT_LISTEN_PREFIX = ""
DEFAULT_LISTEN_SUFFIX = ".*"
DEFAULT_LISTEN_EVENTS = "s3:ObjectCreated:*"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_ACCESS_KEY): cv.string,
vol.Required(CONF_SECRET_KEY): cv.string,
vol.Required(CONF_SECURE): cv.boolean,
vol.Optional(CONF_LISTEN, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_LISTEN_BUCKET): cv.string,
vol.Optional(
CONF_LISTEN_PREFIX, default=DEFAULT_LISTEN_PREFIX
): cv.string,
vol.Optional(
CONF_LISTEN_SUFFIX, default=DEFAULT_LISTEN_SUFFIX
): cv.string,
vol.Optional(
CONF_LISTEN_EVENTS, default=DEFAULT_LISTEN_EVENTS
): cv.string,
}
)
],
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
BUCKET_KEY_SCHEMA = vol.Schema(
{vol.Required(ATTR_BUCKET): cv.template, vol.Required(ATTR_KEY): cv.template}
)
BUCKET_KEY_FILE_SCHEMA = BUCKET_KEY_SCHEMA.extend(
{vol.Required(ATTR_FILE_PATH): cv.template}
)
def setup(hass, config):
"""Set up MinioClient and event listeners."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
access_key = conf[CONF_ACCESS_KEY]
secret_key = conf[CONF_SECRET_KEY]
secure = conf[CONF_SECURE]
queue_listener = QueueListener(hass)
queue = queue_listener.queue
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, queue_listener.start_handler)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, queue_listener.stop_handler)
def _setup_listener(listener_conf):
bucket = listener_conf[CONF_LISTEN_BUCKET]
prefix = listener_conf[CONF_LISTEN_PREFIX]
suffix = listener_conf[CONF_LISTEN_SUFFIX]
events = listener_conf[CONF_LISTEN_EVENTS]
minio_listener = MinioListener(
queue,
get_minio_endpoint(host, port),
access_key,
secret_key,
secure,
bucket,
prefix,
suffix,
events,
)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, minio_listener.start_handler)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, minio_listener.stop_handler)
for listen_conf in conf[CONF_LISTEN]:
_setup_listener(listen_conf)
minio_client = create_minio_client(
get_minio_endpoint(host, port), access_key, secret_key, secure
)
def _render_service_value(service, key):
value = service.data[key]
value.hass = hass
return value.async_render()
def put_file(service):
"""Upload file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
file_path = _render_service_value(service, ATTR_FILE_PATH)
if not hass.config.is_allowed_path(file_path):
_LOGGER.error("Invalid file_path %s", file_path)
return
minio_client.fput_object(bucket, key, file_path)
def get_file(service):
"""Download file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
file_path = _render_service_value(service, ATTR_FILE_PATH)
if not hass.config.is_allowed_path(file_path):
_LOGGER.error("Invalid file_path %s", file_path)
return
minio_client.fget_object(bucket, key, file_path)
def remove_file(service):
"""Delete file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
minio_client.remove_object(bucket, key)
hass.services.register(DOMAIN, "put", put_file, schema=BUCKET_KEY_FILE_SCHEMA)
hass.services.register(DOMAIN, "get", get_file, schema=BUCKET_KEY_FILE_SCHEMA)
hass.services.register(DOMAIN, "remove", remove_file, schema=BUCKET_KEY_SCHEMA)
return True
def get_minio_endpoint(host: str, port: int) -> str:
"""Create minio endpoint from host and port."""
return f"{host}:{port}"
class QueueListener(threading.Thread):
"""Forward events from queue into HASS event bus."""
def __init__(self, hass):
"""Create queue."""
super().__init__()
self._hass = hass
self._queue = Queue()
def run(self):
"""Listen to queue events, and forward them to HASS event bus."""
_LOGGER.info("Running QueueListener")
while True:
event = self._queue.get()
if event is None:
break
_, file_name = os.path.split(event[ATTR_KEY])
_LOGGER.debug(
"Sending event %s, %s, %s",
event["event_name"],
event[ATTR_BUCKET],
event[ATTR_KEY],
)
self._hass.bus.fire(DOMAIN, {"file_name": file_name, **event})
@property
def queue(self):
"""Return wrapped queue."""
return self._queue
def stop(self):
"""Stop run by putting None into queue and join the thread."""
_LOGGER.info("Stopping QueueListener")
self._queue.put(None)
self.join()
_LOGGER.info("Stopped QueueListener")
def start_handler(self, _):
"""Start handler helper method."""
self.start()
def stop_handler(self, _):
"""Stop handler helper method."""
self.stop()
class MinioListener:
"""MinioEventThread wrapper with helper methods."""
def __init__(
self,
queue: Queue,
endpoint: str,
access_key: str,
secret_key: str,
secure: bool,
bucket_name: str,
prefix: str,
suffix: str,
events: List[str],
):
"""Create Listener."""
self._queue = queue
self._endpoint = endpoint
self._access_key = access_key
self._secret_key = secret_key
self._secure = secure
self._bucket_name = bucket_name
self._prefix = prefix
self._suffix = suffix
self._events = events
self._minio_event_thread = None
def start_handler(self, _):
"""Create and start the event thread."""
self._minio_event_thread = MinioEventThread(
self._queue,
self._endpoint,
self._access_key,
self._secret_key,
self._secure,
self._bucket_name,
self._prefix,
self._suffix,
self._events,
)
self._minio_event_thread.start()
def stop_handler(self, _):
"""Issue stop and wait for thread to join."""
if self._minio_event_thread is not None:
self._minio_event_thread.stop()
| apache-2.0 | 4,996,086,234,179,014,000 | 29.4 | 85 | 0.566783 | false | 3.925926 | true | false | false |
matejsuchanek/pywikibot-scripts | fix_qualifiers.py | 1 | 4360 | #!/usr/bin/python
"""This script is obsolete!"""
import pywikibot
from pywikibot import pagegenerators
from .query_store import QueryStore
from .wikidata import WikidataEntityBot
class QualifiersFixingBot(WikidataEntityBot):
blacklist = frozenset(['P143', 'P248', 'P459', 'P518', 'P577', 'P805',
'P972', 'P1065', 'P1135', 'P1480', 'P1545', 'P1932',
'P2315', 'P2701', 'P3274', ])
whitelist = frozenset(['P17', 'P21', 'P39', 'P155', 'P156', 'P281', 'P580',
'P582', 'P585', 'P669', 'P708', 'P969', 'P1355',
'P1356', ])
good_item = 'Q15720608'
use_from_page = False
def __init__(self, **kwargs):
kwargs.update({
'bad_cache': kwargs.get('bad_cache', []) + list(self.blacklist),
'good_cache': kwargs.get('good_cache', []) + list(self.whitelist),
})
super().__init__(**kwargs)
self.store = QueryStore()
def filterProperty(self, prop_page):
if prop_page.type == 'external-id':
return False
prop_page.get()
if 'P31' not in prop_page.claims:
pywikibot.warning('%s is not classified' % prop_page.getID())
return False
for claim in prop_page.claims['P31']:
if claim.target_equals(self.good_item):
return True
return False
@property
def generator(self):
query = self.store.build_query(
'qualifiers', item=self.good_item,
good=', wd:'.join(self.whitelist),
bad=', wd:'.join(self.blacklist))
return pagegenerators.PreloadingItemGenerator(
pagegenerators.WikidataSPARQLPageGenerator(query, site=self.repo))
def treat_page_and_item(self, page, item):
for prop in item.claims.keys():
for claim in item.claims[prop]:
moved = set()
json = claim.toJSON()
i = -1
for source in claim.sources:
i += 1
for ref_prop in filter(self.checkProperty, source.keys()):
for snak in source[ref_prop]:
json.setdefault('qualifiers', {}).setdefault(ref_prop, [])
for qual in (pywikibot.Claim.qualifierFromJSON(self.repo, q)
for q in json['qualifiers'][ref_prop]):
if qual.target_equals(snak.getTarget()):
break
else:
snak.isReference = False
snak.isQualifier = True
json['qualifiers'][ref_prop].append(snak.toJSON())
json['references'][i]['snaks'][ref_prop].pop(0)
if len(json['references'][i]['snaks'][ref_prop]) == 0:
json['references'][i]['snaks'].pop(ref_prop)
if len(json['references'][i]['snaks']) == 0:
json['references'].pop(i)
i -= 1
moved.add(ref_prop)
if len(moved) > 0:
data = {'claims': [json]}
self.user_edit_entity(item, data, summary=self.makeSummary(prop, moved),
asynchronous=True)
def makeSummary(self, prop, props):
props = ['[[Property:P%s]]' % pid for pid in sorted(
int(pid[1:]) for pid in props)]
return '[[Property:%s]]: moving misplaced reference%s %s to qualifiers' % (
prop, 's' if len(props) > 1 else '', '%s and %s' % (
', '.join(props[:-1]), props[-1]) if len(props) > 1 else props[0])
def main(*args):
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-'):
arg, sep, value = arg.partition(':')
if value != '':
options[arg[1:]] = value if not value.isdigit() else int(value)
else:
options[arg[1:]] = True
site = pywikibot.Site('wikidata', 'wikidata')
bot = QualifiersFixingBot(site=site, **options)
bot.run()
if __name__ == '__main__':
main()
| gpl-2.0 | 4,387,339,838,632,818,700 | 38.636364 | 92 | 0.482569 | false | 4.052045 | false | false | false |
mediatum/mediatum | workflow/addtofolder.py | 1 | 4475 | # -*- coding: utf-8 -*-
"""
mediatum - a multimedia content repository
Copyright (C) 2011 Arne Seifert <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from .upload import WorkflowStep
from .workflow import registerStep
from core.translation import t, addLabels
from utils.utils import isNumeric
from core import Node
from core import db
import json
from schema.schema import Metafield
from contenttypes.container import Directory
q = db.query
def register():
registerStep("workflowstep_addtofolder")
addLabels(WorkflowStep_AddToFolder.getLabels())
class WorkflowStep_AddToFolder(WorkflowStep):
"""
workflowstep that adds item to selectable subfolder.
attributes:
- destination: list of node ids ;-separated
- subfolder: list of subfolders below destination, if a subfolder exists the item is added and the remaining
subfolders are ignored
subfolders are specified as json-string and can contain metadata from the item, like:
["{faculty}/Prüfungsarbeiten/{type}en/", "{faculty}/Prüfungsarbeiten/Weitere Prüfungsarbeiten/"]
"""
def show_workflow_node(self, node, req):
return self.forwardAndShow(node, True, req)
def getFolder(self, node, destNode, subfolder):
"""
search the subfolder below destNode
:param node: node which should be placed in the subfolder, parts of the node attributes may be specified
in subfolder
:param destNode: destination Node under which the subfolder is searched
:param subfolder: directorypath to the subfolder below destNode like: "{faculty}/Prüfungsarbeiten/{type}en/"
:return: returns the node if the subfolder if found or None
"""
subfolderNode = destNode
for subdir in subfolder.format(**node.attrs).split("/"):
if not subdir:
continue
subfolderNode = subfolderNode.children.filter_by(name=subdir).scalar()
if not subfolderNode:
return None
return subfolderNode
def runAction(self, node, op=""):
subfolders = json.loads(self.get('destination_subfolder'))
for nid in self.get('destination').split(";"):
if not nid:
continue
destNode = q(Node).get(nid)
if not destNode:
continue
for subfolder in subfolders:
subfolderNode = self.getFolder(node, destNode, subfolder)
if not subfolderNode:
continue
subfolderNode.children.append(node)
db.session.commit()
break
def metaFields(self, lang=None):
ret = []
field = Metafield("destination")
field.set("label", t(lang, "admin_wfstep_addtofolder_destination"))
field.set("type", "treeselect")
ret.append(field)
field = Metafield("destination_subfolder")
field.set("label", t(lang, "admin_wfstep_addtofolder_destination_subfolder"))
field.set("type", "text")
ret.append(field)
return ret
@staticmethod
def getLabels():
return {"de":
[
("workflowstep-addtofolder", "Zu einem Verzeichnis hinzufügen"),
("admin_wfstep_addtofolder_destination", "Zielknoten-ID"),
("admin_wfstep_addtofolder_destination_subfolder", "Unterverzeichnis"),
],
"en":
[
("workflowstep-addtofolder", "add to folder"),
("admin_wfstep_addtofolder_destination", "ID of destination node"),
("admin_wfstep_addtofolder_destination_subfolder", "sub folder"),
]
}
| gpl-3.0 | 92,644,555,041,265,800 | 36.25 | 121 | 0.625056 | false | 4.365234 | false | false | false |
greggian/TapdIn | django/core/handlers/base.py | 1 | 9267 | import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a middleware module' % middleware_path
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing middleware %s: "%s"' % (mw_module, e)
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
# Get urlconf from request object, if available. Otherwise use default.
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError, "The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
exc_info = sys.exc_info()
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, exc_info)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every webserver (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| apache-2.0 | -8,968,178,709,805,447,000 | 42.985437 | 143 | 0.604726 | false | 4.811526 | false | false | false |
luksan/kodos | modules/urlDialog.py | 1 | 1874 | # -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; truncate-lines: 0 -*-
# vi: set fileencoding=utf-8 filetype=python expandtab tabstop=4 shiftwidth=4 softtabstop=4 cindent:
# :mode=python:indentSize=4:tabSize=4:noTabs=true:
#-----------------------------------------------------------------------------#
# Built-in modules
import urllib
#-----------------------------------------------------------------------------#
# Installed modules
from PyQt4 import QtGui, QtCore
#-----------------------------------------------------------------------------#
# Kodos modules
from .urlDialogBA import Ui_URLDialogBA
from . import help
#-----------------------------------------------------------------------------#
class URLDialog(QtGui.QDialog, Ui_URLDialogBA):
urlImported = QtCore.pyqtSignal(str, str)
def __init__(self, url=None, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QDialog.__init__(self, parent, f)
self.setupUi(self)
if url:
self.URLTextEdit.setPlainText(url)
self.show()
return
def help_slot(self):
self.helpWindow = help.Help(self, "importURL.html")
return
def ok_slot(self):
url = str(self.URLTextEdit.toPlainText())
try:
fp = urllib.urlopen(url)
lines = fp.readlines()
except Exception as e:
QtGui.QMessageBox.information(
None,
"Failed to open URL",
"Could not open the specified URL. Please check to ensure \
that you have entered the correct URL.\n\n{0}".format(str(e))
)
return
html = ''.join(lines)
self.urlImported.emit(html, url)
self.accept()
return
#-----------------------------------------------------------------------------#
| gpl-2.0 | -2,844,676,749,710,994,000 | 28.28125 | 112 | 0.47492 | false | 4.419811 | false | false | false |
luzhuomi/collamine-client-python | scrapybot/scrapybot/spiders/hwz.py | 1 | 1627 | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapybot.items import ScrapybotItem
from scrapybot.utils import normalizeFriendlyDate
import datetime
from dateutil.parser import parse
from django.utils import timezone
import django.db.utils
class HwzSpider(CrawlSpider):
name = "hwz"
allowed_domains = ["hardwarezone.com.sg"]
domain = "www.hardwarezone.com" # for collamine upload
start_urls = [
"http://forums.hardwarezone.com.sg/current-affairs-lounge-17/",
"http://forums.hardwarezone.com.sg/money-mind-210/"
]
rules = (
Rule(SgmlLinkExtractor(allow=('current\-affairs\-lounge\-17/.*\.html', )), callback='parse_item', follow=True),
Rule(SgmlLinkExtractor(allow=('money\-mind\-210/.*\.html', )), callback='parse_item', follow=True),
)
"""
When writing crawl spider rules, avoid using parse as callback, since the CrawlSpider uses the parse method itself to implement its logic. So if you override the parse method, the crawl spider will no longer work.
"""
def parse_item(self, response):
source="original"
if ((response.flags) and ("collamine" in response.flags)):
source="collamine"
i = ScrapybotItem(url=response.url,
domain=self.allowed_domains[0],
source=source,
content=response.body.decode(response.encoding),
crawled_date=timezone.now())
try:
i.save()
except django.db.utils.IntegrityError:
print "url exists"
| apache-2.0 | -3,216,291,927,608,126,000 | 35.977273 | 217 | 0.690227 | false | 3.70615 | false | false | false |
oudalab/phyllo | phyllo/extractors/germanicusDB.py | 1 | 3002 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/germanicus.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.split(":")[0].strip()
colltitle = collSOUP.p.string.split(":")[0].strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Germanicus'")
for url in textsURL:
chapter = "-1"
verse = 0
title = collSOUP.title.string.split(":")[1].strip()
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
getp = textsoup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
txtstr = p.get_text().strip()
if txtstr.startswith("The"):
continue
brtags = p.findAll('br')
verses = []
try:
firstline = brtags[0].previous_sibling.previous_sibling.strip()
except:
firstline = brtags[0].previous_sibling.strip()
verses.append(firstline)
for br in brtags:
try:
text = br.next_sibling.next_sibling.strip()
except:
text = br.next_sibling.strip()
if text is None or text == '' or text.isspace():
continue
# remove in-text line numbers
if text.endswith(r'[0-9]+'):
try:
text = text.split(r'[0-9]')[0].strip()
except:
pass
verses.append(text)
for v in verses:
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'poetry'))
if __name__ == '__main__':
main()
| apache-2.0 | -921,874,890,846,832,400 | 34.738095 | 111 | 0.476682 | false | 4.494012 | false | false | false |
kubernetes-client/python | kubernetes/client/models/v1_job_spec.py | 1 | 13937 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1JobSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'active_deadline_seconds': 'int',
'backoff_limit': 'int',
'completions': 'int',
'manual_selector': 'bool',
'parallelism': 'int',
'selector': 'V1LabelSelector',
'template': 'V1PodTemplateSpec',
'ttl_seconds_after_finished': 'int'
}
attribute_map = {
'active_deadline_seconds': 'activeDeadlineSeconds',
'backoff_limit': 'backoffLimit',
'completions': 'completions',
'manual_selector': 'manualSelector',
'parallelism': 'parallelism',
'selector': 'selector',
'template': 'template',
'ttl_seconds_after_finished': 'ttlSecondsAfterFinished'
}
def __init__(self, active_deadline_seconds=None, backoff_limit=None, completions=None, manual_selector=None, parallelism=None, selector=None, template=None, ttl_seconds_after_finished=None, local_vars_configuration=None): # noqa: E501
"""V1JobSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._active_deadline_seconds = None
self._backoff_limit = None
self._completions = None
self._manual_selector = None
self._parallelism = None
self._selector = None
self._template = None
self._ttl_seconds_after_finished = None
self.discriminator = None
if active_deadline_seconds is not None:
self.active_deadline_seconds = active_deadline_seconds
if backoff_limit is not None:
self.backoff_limit = backoff_limit
if completions is not None:
self.completions = completions
if manual_selector is not None:
self.manual_selector = manual_selector
if parallelism is not None:
self.parallelism = parallelism
if selector is not None:
self.selector = selector
self.template = template
if ttl_seconds_after_finished is not None:
self.ttl_seconds_after_finished = ttl_seconds_after_finished
@property
def active_deadline_seconds(self):
"""Gets the active_deadline_seconds of this V1JobSpec. # noqa: E501
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer # noqa: E501
:return: The active_deadline_seconds of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
"""Sets the active_deadline_seconds of this V1JobSpec.
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer # noqa: E501
:param active_deadline_seconds: The active_deadline_seconds of this V1JobSpec. # noqa: E501
:type: int
"""
self._active_deadline_seconds = active_deadline_seconds
@property
def backoff_limit(self):
"""Gets the backoff_limit of this V1JobSpec. # noqa: E501
Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
:return: The backoff_limit of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._backoff_limit
@backoff_limit.setter
def backoff_limit(self, backoff_limit):
"""Sets the backoff_limit of this V1JobSpec.
Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
:param backoff_limit: The backoff_limit of this V1JobSpec. # noqa: E501
:type: int
"""
self._backoff_limit = backoff_limit
@property
def completions(self):
"""Gets the completions of this V1JobSpec. # noqa: E501
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:return: The completions of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._completions
@completions.setter
def completions(self, completions):
"""Sets the completions of this V1JobSpec.
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:param completions: The completions of this V1JobSpec. # noqa: E501
:type: int
"""
self._completions = completions
@property
def manual_selector(self):
"""Gets the manual_selector of this V1JobSpec. # noqa: E501
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
:return: The manual_selector of this V1JobSpec. # noqa: E501
:rtype: bool
"""
return self._manual_selector
@manual_selector.setter
def manual_selector(self, manual_selector):
"""Sets the manual_selector of this V1JobSpec.
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
:param manual_selector: The manual_selector of this V1JobSpec. # noqa: E501
:type: bool
"""
self._manual_selector = manual_selector
@property
def parallelism(self):
"""Gets the parallelism of this V1JobSpec. # noqa: E501
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:return: The parallelism of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._parallelism
@parallelism.setter
def parallelism(self, parallelism):
"""Sets the parallelism of this V1JobSpec.
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:param parallelism: The parallelism of this V1JobSpec. # noqa: E501
:type: int
"""
self._parallelism = parallelism
@property
def selector(self):
"""Gets the selector of this V1JobSpec. # noqa: E501
:return: The selector of this V1JobSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1JobSpec.
:param selector: The selector of this V1JobSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def template(self):
"""Gets the template of this V1JobSpec. # noqa: E501
:return: The template of this V1JobSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1JobSpec.
:param template: The template of this V1JobSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
@property
def ttl_seconds_after_finished(self):
"""Gets the ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. # noqa: E501
:return: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._ttl_seconds_after_finished
@ttl_seconds_after_finished.setter
def ttl_seconds_after_finished(self, ttl_seconds_after_finished):
"""Sets the ttl_seconds_after_finished of this V1JobSpec.
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. # noqa: E501
:param ttl_seconds_after_finished: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
:type: int
"""
self._ttl_seconds_after_finished = ttl_seconds_after_finished
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1JobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1JobSpec):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 6,172,444,438,612,771,000 | 43.244444 | 685 | 0.664705 | false | 4.175255 | true | false | false |
andrewjpage/gubbins | python/scripts/gubbins_drawer.py | 1 | 26242 | #!/usr/bin/env python3
#################################
# Import some necessary modules #
#################################
import argparse
import pkg_resources
from Bio.Nexus import Trees, Nodes
from Bio.Graphics.GenomeDiagram._Colors import ColorTranslator
from Bio.GenBank import Scanner
from Bio.GenBank import _FeatureConsumer
from Bio.GenBank.utils import FeatureValueCleaner
from reportlab.lib.units import inch
from reportlab.lib import pagesizes
from reportlab.graphics.shapes import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import renderPDF
################################
# Get the command line options #
################################
def main():
parser = argparse.ArgumentParser(description='Gubbins Drawer creates a PDF with a tree on one side and the recombination regions plotted on the reference space on the other side. An interactive version can be found at https://sanger-pathogens.github.io/phandango/', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('tree', help='Tree in Newick format, such as XXXX.final_tree.tre')
parser.add_argument('embl_file', help='EMBL file, such as XXXX.recombination_predictions.embl')
parser.add_argument( '-o', '--outputfile', help='Output PDF filename', default = 'gubbins_recombinations.pdf')
return parser.parse_args()
##########################################################
# Function to read an alignment whichever format it's in #
##########################################################
def tab_parser(handle, quiet=False):
def Drawer_parse_tab_features(object, skip=False):
features = []
line = object.line
while True:
if not line:
break
raise ValueError("Premature end of line during features table")
if line[:object.HEADER_WIDTH].rstrip() in object.SEQUENCE_HEADERS:
if object.debug : print("Found start of sequence")
break
line = line.rstrip()
if line == "//":
raise ValueError("Premature end of features table, marker '//' found")
if line in object.FEATURE_END_MARKERS:
if object.debug : print("Found end of features")
line = object.handle.readline()
break
if line[2:object.FEATURE_QUALIFIER_INDENT].strip() == "":
print(line[2:object.FEATURE_QUALIFIER_INDENT].strip())
raise ValueError("Expected a feature qualifier in line '%s'" % line)
if skip:
line = object.handle.readline()
while line[:object.FEATURE_QUALIFIER_INDENT] == object.FEATURE_QUALIFIER_SPACER:
line = object.handle.readline()
else:
#Build up a list of the lines making up this feature:
feature_key = line[2:object.FEATURE_QUALIFIER_INDENT].strip()
feature_lines = [line[object.FEATURE_QUALIFIER_INDENT:]]
line = object.handle.readline()
while line[:object.FEATURE_QUALIFIER_INDENT] == object.FEATURE_QUALIFIER_SPACER or line.rstrip() == "" : # cope with blank lines in the midst of a feature
feature_lines.append(line[object.FEATURE_QUALIFIER_INDENT:].rstrip())
line = object.handle.readline()
if len(line)==0:
break#EOF
feature_lines.append('/seq="N"')
sys.stdout.flush()
features.append(object.parse_feature(feature_key, feature_lines))
object.line = line
return features
def Drawer_feed(object, handle, consumer, do_features=True):
if do_features:
object._feed_feature_table(consumer, Drawer_parse_tab_features(object,skip=False))
else:
Drawer_parse_tab_features(object,skip=True) # ignore the data
sequence_string="N"
consumer.sequence(sequence_string)
consumer.record_end("//")
length=0
for record in consumer.data.features:
if record.location.nofuzzy_end>length:
length=record.location.nofuzzy_end
consumer.data.seq="N"*length
return True
myscanner=Scanner.InsdcScanner()
myscanner.set_handle(handle)
myscanner.line=myscanner.handle.readline()
myscanner.FEATURE_QUALIFIER_INDENT=21
myscanner.FEATURE_QUALIFIER_SPACER = "FT" + " " * (myscanner.FEATURE_QUALIFIER_INDENT-2)
myscanner.debug=True
consumer = _FeatureConsumer(use_fuzziness = 1, feature_cleaner = FeatureValueCleaner())
Drawer_feed(myscanner, handle, consumer)
return consumer.data
####################################################
# Function to round floats to n significant digits #
####################################################
def round_to_n(x, n):
if n < 1:
raise ValueError("number of significant digits must be >= 1")
# Use %e format to get the n most significant digits, as a string.
format = "%." + str(n-1) + "e"
as_string = format % x
if x>=10 or x<=-10:
return int(float(as_string))
else:
return float(as_string)
##############################################################################################################
# Function to convert features with subfeatures (e.g. pseudogenes) to a list of locations of the subfeatures #
##############################################################################################################
def iterate_subfeatures(feature, locations):
if len(feature.sub_features)>0:
for subfeature in feature.sub_features:
locations=iterate_subfeatures(subfeature, locations)
else:
locations.append((feature.location.start.position, feature.location.end.position))
return locations
####################################################
# Function to get the pixel width of a text string #
####################################################
def get_text_width(font, size, text):
c = Canvas(test,pagesize=pagesize)
length= c.stringWidth(str(text),font,size)
return length
#####################################################################################
# Function to add an embl file to multiple tracks split using the qualifiers option #
#####################################################################################
def add_ordered_embl_to_diagram(record, incfeatures=["CDS", "feature"], emblfile=True):
incfeatures= [x.lower() for x in incfeatures]
new_tracks={}
print(len(record.features), "features found for", record.name)
if len(record.seq)>500000:
scale_largetick_interval=int(round((len(record.seq)/10),-5))
scale_smalltick_interval=int(round((len(record.seq)/10),-5)/5)
else:
scale_largetick_interval=len(record.seq)
scale_smalltick_interval=len(record.seq)/5
for x, feature in enumerate(record.features):
if feature.type.lower() not in incfeatures or feature.location.nofuzzy_end<0 or (feature.location.nofuzzy_start>-1 and -1!=-1):
continue
if "colour" in feature.qualifiers:
colourline=feature.qualifiers["colour"][0]
elif "color" in feature.qualifiers:
colourline=feature.qualifiers["color"][0]
else:
colourline = "5"
if len(colourline.split())==1:
colour=translator.artemis_color(colourline)
elif len(colourline.split())==3:
colour=translator.int255_color((int(colourline.split()[0]),int(colourline.split()[1]),int(colourline.split()[2])))
else:
print("Can't understand colour code!")
print(colourline)
sys.exit()
locations=[]
locations.append((feature.location.start, feature.location.end))
if "taxa" in feature.qualifiers:
qualifiernames=feature.qualifiers["taxa"][0].replace(", "," ").split()
for taxonname in qualifiernames:
taxonname=taxonname.strip()
if not taxonname in new_tracks:
newtrack = Track()
newtrack.name=taxonname
new_tracks[taxonname]=newtrack
arrows=0
new_tracks[taxonname].add_feature(locations, fillcolour=colour, strokecolour=colour, arrows=arrows)
else:
if not record.name in new_tracks:
newtrack = Track()
newtrack.name=record.name
new_tracks[record.name]=newtrack
arrows=0
new_tracks[record.name].add_feature(locations, fillcolour=colour, strokecolour=colour, arrows=arrows)
if len(new_tracks)>1 and record.name in new_tracks:
del new_tracks[record.name]
return new_tracks
###################################################################################
# Function to add a tab file to multiple tracks split using the qualifiers option #
###################################################################################
def add_ordered_tab_to_diagram(filename):
features={"":[]}
featurename=""
names_to_add_feature_to=[]
try:
record=tab_parser(open(filename,"r"))
except IOError:
print("Cannot find file", filename)
sys.exit()
record.name=filename
new_tracks=add_ordered_embl_to_diagram(record, incfeatures=["i", "d", "li", "del", "snp", "misc_feature", "core", "cds", "insertion", "deletion", "recombination", "feature", "blastn_hit", "fasta_record", "variation"], emblfile=False)
return new_tracks
def add_empty_track(existing_tracks, track_name):
newtrack = Track()
newtrack.name=track_name
newtrack.beginning=0
newtrack.track_height=1
existing_tracks[track_name] = newtrack
existing_tracks[track_name].add_feature(locations=[(0,0)], fillcolour=translator.artemis_color(2), strokecolour=translator.artemis_color(2), arrows=0)
return existing_tracks
#############################
# Function to draw the tree #
#############################
def drawtree(treeObject, treeheight, treewidth, xoffset, yoffset, name_offset=5):
def get_max_branch_depth():
terminals=treeObject.get_terminals()
maxbrlen=0.0
for terminal in terminals:
if treeObject.sum_branchlength(node=terminal)>maxbrlen:
maxbrlen=treeObject.sum_branchlength(node=terminal)
return maxbrlen
def draw_scale():
if vertical_scaling_factor<5:
linewidth=0.5
else:
linewidth=1.0
branchlength=round_to_n(max_branch_depth/10, 2)*horizontal_scaling_factor
horizontalpos=xoffset+round_to_n(max_branch_depth/10, 2)*horizontal_scaling_factor
vertpos=treebase-fontsize
scalestring = str(round_to_n(max_branch_depth/10, 2))
scalefontsize=fontsize
if scalefontsize<6:
scalefontsize=6
d.add(Line(horizontalpos, vertpos, horizontalpos+branchlength, vertpos, strokeWidth=linewidth))
d.add(String(horizontalpos+(float(branchlength)/2), vertpos-(scalefontsize+1), scalestring, textAnchor='middle', fontSize=scalefontsize, fontName='Helvetica'))
def get_node_vertical_positions():
def get_node_vertical_position(node):
for daughter in treeObject.node(node).succ:
get_node_vertical_position(daughter)
if not treeObject.is_terminal(node):
daughters=treeObject.node(node).succ
if treeObject.node(node).data.comment==None:
treeObject.node(node).data.comment={}
treeObject.node(node).data.comment["vertpos"]=float(treeObject.node(daughters[0]).data.comment["vertpos"]+treeObject.node(daughters[-1]).data.comment["vertpos"])/2
node=treeObject.root
get_node_vertical_position(node)
def drawbranch(node,horizontalpos):
vertpos=treeObject.node(node).data.comment["vertpos"]+yoffset
horizontalpos+=xoffset
branchlength=treeObject.node(node).data.branchlength*horizontal_scaling_factor
if vertical_scaling_factor<5:
linewidth=0.5
else:
linewidth=1.0
if treeObject.node(node).data.comment and "branch_colour" in treeObject.node(node).data.comment:
r,g,b=treeObject.node(node).data.comment["branch_colour"]
branch_colour=colors.Color(float(r)/255,float(g)/255,float(b)/255)
else:
branch_colour=colors.black
if branchlength<linewidth:
branchlength=linewidth
d.add(Line(horizontalpos-(linewidth/2), vertpos, (horizontalpos-(linewidth/2))+branchlength, vertpos, strokeWidth=linewidth, strokeColor=branch_colour))
if node!=treeObject.root:
parentnode=treeObject.node(node).prev
sisters=treeObject.node(parentnode).succ
parentvertpos=treeObject.node(parentnode).data.comment["vertpos"]+yoffset
d.add(Line(horizontalpos, vertpos, horizontalpos, parentvertpos, strokeWidth=linewidth, strokeColor=branch_colour))
if treeObject.is_terminal(node):
if treeObject.node(node).data.comment and "name_colour" in treeObject.node(node).data.comment:
name_colours=[]
for x in range(0,len(treeObject.node(node).data.comment["name_colour"])):
r,g,b= treeObject.node(node).data.comment["name_colour"][x]
name_colours.append(colors.Color(float(r)/255,float(g)/255,float(b)/255))
else:
name_colours=[colors.black]
gubbins_length=0.0
colpos=0
namewidth=get_text_width('Helvetica', fontsize, treeObject.node(node).data.taxon)+name_offset
gubbins_length += namewidth
colpos=1
for x in range(colpos,len(name_colours)):
gubbins_length += block_length
if x!=0:
gubbins_length += vertical_scaling_factor
#Add the taxon names
d.add(String(treewidth+xoffset+(max_name_width-gubbins_length)+(fontsize/2), vertpos-(fontsize/3), treeObject.node(node).data.taxon, textAnchor='start', fontSize=fontsize, fillColor=name_colours[0], fontName='Helvetica'))
block_xpos=treewidth+xoffset+(max_name_width-gubbins_length)+(fontsize/2)+namewidth
# draw dashed lines
d.add(Line(horizontalpos+branchlength, vertpos, treewidth+xoffset+(max_name_width-gubbins_length), vertpos, strokeDashArray=[1, 2], strokeWidth=linewidth/2, strokeColor=name_colours[0]))
def recurse_subtree(node, horizontalpos):
daughters=treeObject.node(node).succ
daughterhorizontalpos=horizontalpos+(treeObject.node(node).data.branchlength*horizontal_scaling_factor)
drawbranch(node,horizontalpos)
for daughter in daughters:
recurse_subtree(daughter,daughterhorizontalpos)
def get_max_name_width(name_offset, fontsize):
max_width=0.0
for taxon in treeObject.get_terminals():
curwidth= get_text_width("Helvetica", fontsize, treeObject.node(taxon).data.taxon)
if curwidth>max_width:
max_width=curwidth
return max_width
fontsize=vertical_scaling_factor
if fontsize>12:
fontsize=12
while get_max_name_width(name_offset, fontsize)+name_offset>treewidth/3:
fontsize-=0.2
max_name_width=get_max_name_width(name_offset, fontsize)+name_offset
colblockstart=1
block_length=0
treewidth-=(max_name_width+(fontsize/2))
max_branch_depth=get_max_branch_depth()
horizontal_scaling_factor=float(treewidth)/max_branch_depth
get_node_vertical_positions()
recurse_subtree(treeObject.root, 0)
treebase=treeObject.node(treeObject.get_terminals()[-1]).data.comment["vertpos"]+yoffset
draw_scale()
return
#################
# Drawing class #
#################
class Figure:
def __init__(self, beginning, end):
self.begnining=0
self.end=-1
###############
# Track class #
###############
class Track:
def __init__(self, track_position=[-1,-1], track_height=0, track_length=0, track_draw_proportion=0.75, scale=False, tick_marks=True, tick_mark_number=5, tick_mark_labels=True, minor_tick_marks=True, minor_tick_mark_number=3, features=[], beginning=0, end=-1):
self.track_position=track_position#horizontal and vertical position of centre of track
self.track_height=track_height#height of space allocated for track
self.track_length=track_length
self.track_draw_proportion=track_draw_proportion#proportion of the track that should be used for drawing
self.scale=scale
self.scale_position="middle"
self.tick_marks=tick_marks
self.tick_mark_number=tick_mark_number
self.tick_mark_labels=tick_mark_labels
self.tick_mark_label_font="Helvetica"
self.tick_mark_label_size=8
self.tick_mark_label_angle=45
self.minor_tick_marks=minor_tick_marks
self.minor_tick_mark_number=minor_tick_mark_number
self.features=features[:]
self.scaled_features=features[:]
self.draw_feature_labels=False
self.feature_label_size=8
self.feature_label_angle=0
self.feature_label_font="Helvetica"
self.greytrack=False
self.grey_track_colour=colors.Color(0.25,0.25,0.25)
self.grey_track_opacity_percent=10
self.max_feature_length=-1
self.beginning=0
self.end=-1
self.track_number=-1
self.plots=[]
self.fragments=1
self.name=""
self.show_name=False
self.name_font="Helvetica"
self.name_size=10
self.name_length=0
self.is_key=False
self.key_data=[]
def get_max_feature_length(self):
max_feature_length=0
for feature in self.features:
for location in feature.feature_locations:
if location[0]>max_feature_length:
max_feature_length=location[0]
if location[1]>max_feature_length:
max_feature_length=location[1]
return max_feature_length
def scale_feature_positions(self):
self.scaled_features=[]
if self.end!=-1:
length=float(self.end-self.beginning)
else:
length=float(self.max_feature_length-self.beginning)
for feature in self.features:
newfeature=Feature()
newfeature.fillcolour=feature.fillcolour
newfeature.strokecolour=feature.strokecolour
newfeature.strokeweight=feature.strokeweight
newfeature.strand=feature.strand
newfeature.label=feature.label
newfeature.arrows=feature.arrows
scaledlocations=[]
for location in feature.feature_locations:
start=location[0]
finish=location[1]
if self.beginning!=0:
if start<self.beginning and finish>self.beginning:
start=self.beginning
if self.end!=-1:
if start<self.end and finish>self.end:
finish=self.end
start-=self.beginning
finish-=self.beginning
scaledlocations.append(((float(start)/length)*self.track_length,(float(finish)/length)*self.track_length))
newfeature.feature_locations=scaledlocations
self.scaled_features.append(newfeature)
def draw_features(self):
if self.max_feature_length==-1:
return
else:
self.scale_feature_positions()
featuresort=[]
for x, feature in enumerate(self.scaled_features):
featuresort.append([feature.feature_locations[0][0], x])
joins=[]
for featurenum in featuresort[::-1]:
feature=self.scaled_features[featurenum[1]]
#if the feature is white, outline it in black so we can see it
if feature.strokecolour==colors.Color(1,1,1,1):
feature.strokecolour=colors.Color(0,0,0,1)
subfeaturesort=[]
for x, subfeature in enumerate(feature.feature_locations):
subfeaturesort.append([subfeature[0], x])
subfeaturesort.sort()
subfeature_locations=[]
for subfeaturenum in subfeaturesort:
subfeature_locations.append(feature.feature_locations[subfeaturenum[1]])
for x, location in enumerate(subfeature_locations):
if (location[0]>0 and location[0]<=self.track_length) or (location[1]>0 and location[1]<=self.track_length):
y=self.track_position[1]-((float(self.track_height)/4)*self.track_draw_proportion)
height=(float(self.track_height)*self.track_draw_proportion)/2
y1=self.track_position[1]
y2=self.track_position[1]+((float(self.track_height)/8)*self.track_draw_proportion)
if feature.arrows==0:
d.add(Rect(self.track_position[0]+location[0], y, location[1]-location[0], height, fillColor=feature.fillcolour, strokeColor=feature.strokecolour, strokeWidth=feature.strokeweight))
if len(subfeature_locations)>x+1 and subfeature_locations[x+1][0]<=self.track_length:
if subfeature_locations[x+1][0]<location[1]:
joinheight=y1
elif y2>y1:
if (y2-y1)>(float(subfeature_locations[x+1][0]-location[1])/2):
joinheight=y1+(float(subfeature_locations[x+1][0]-location[1])/2)
else:
joinheight=y2
else:
if (y1-y2)>(float(subfeature_locations[x+1][0]-location[1])/2):
joinheight=y1-(float(subfeature_locations[x+1][0]-location[1])/2)
else:
joinheight=y2
joins.append(Line(self.track_position[0]+location[1], y1, self.track_position[0]+location[1]+(float(subfeature_locations[x+1][0]-location[1])/2), joinheight, strokeDashArray=[0.5, 1], strokeWidth=0.5))
joins.append(Line(self.track_position[0]+((location[1]+subfeature_locations[x+1][0])/2), joinheight, self.track_position[0]+location[1]+(float(subfeature_locations[x+1][0]-location[1])), y1, strokeDashArray=[0.5, 1], strokeWidth=0.5))
for join in joins:
d.add(join)
self.scaled_features=[]
def draw_track(self):
self.draw_features()
def add_feature(self,locations=[(-1,-1)], fillcolour=colors.white, strokecolour=colors.black, strokeweight=0, label="", strand=0, arrows=0):
newfeature=Feature()
feature_locations=[]
for location in locations:
if location[0]>location[1]:
feature_locations.append((location[1],location[0]))
else:
feature_locations.append((location[0],location[1]))
newfeature.feature_locations=feature_locations
newfeature.fillcolour=fillcolour
newfeature.strokecolour=strokecolour
newfeature.strokeweight=strokeweight
newfeature.strand=strand
newfeature.label=label
newfeature.arrows=arrows
self.features.append(newfeature)
def sort_features_by_length(self):
featurelist=[]
ordered_features=[]
for x, feature in enumerate(self.features):
featurelist.append([feature.feature_locations[-1][1]-feature.feature_locations[0][0], x])
featurelist.sort()
#featurelist.reverse()
for feature in featurelist:
ordered_features.append(self.features[feature[1]])
self.features=ordered_features[:]
#################
# Feature class #
#################
class Feature:
def __init__(self):
self.feature_locations=[(-1,-1)]
self.strand=0
self.arrows=0
self.label=""
self.fillcolour=colors.blue
self.strokecolour=colors.black
self.strokeweight=0
################
# Main program #
################
if __name__ == "__main__":
options = main()
pagesize=pagesizes.A4
height, width = pagesize
if len(options.embl_file)==0:
print("Found nothing to draw")
sys.exit()
d = Drawing(width, height)
margin=0.5*inch
metadatanames={}
namecolours={}
colour_dict=[]
my_tracks={}
#create translator object for translating artemis colours to GenomeDiagram colours
translator = ColorTranslator()
track_count=0
tree_positions=[]
track_names={}
input_order=[]
new_tracks=add_ordered_tab_to_diagram(options.embl_file)
for track in new_tracks:
newtrack=new_tracks[track]
newtrack.beginning=0
newtrack.name=new_tracks[track].name
name=newtrack.name
x=1
while name in my_tracks:
name=newtrack.name+"_"+str(x)
x+=1
if not newtrack.name in track_names:
track_names[newtrack.name]=[]
input_order.append(name)
track_names[newtrack.name].append(name)
track_count+=1
newtrack.track_height=1
my_tracks[name]=newtrack
treenames=[]
tree_name_to_node={}
listnames=[]
if options.tree!="":
if not os.path.isfile(options.tree):
print("Cannot find file:", options.tree)
options.tree=""
else:
treestring=open(options.tree,"rU").read().strip()
tree=Trees.Tree(treestring, rooted=True)
tree.root
treeterminals=tree.get_terminals()
totalbr=0.0
for terminal_node in treeterminals:
terminal=tree.node(terminal_node).data.taxon
treenames.append(terminal)
if not terminal in track_names:
track_count+=1
tree_name_to_node[terminal]=terminal_node
tree.node(terminal_node).data.comment={}
tree.node(terminal_node).data.comment["name_colour"]=[(0,0,0)]
#from this we can work out a constant for the height of a track which takes into account the height of the page and margin sizes
vertical_scaling_factor=float(height-(margin*2))/(track_count)
#to make sure names can be printed in the space of a track, we can scale the name to the same size as the vertical scaling factor, but limit it to 12pt so it doesn't get crazily big
name_font_size=vertical_scaling_factor
if name_font_size>12:
name_font_size=12
left_proportion=0.3
treetrack=0
output_order=treenames[::-1]
for name in input_order[::-1]:
if not name in treenames:
output_order.append(name)
track_number=0
for track in output_order:
if(track not in my_tracks):
my_tracks = add_empty_track(my_tracks, track)
track_height=my_tracks[track].track_height
my_tracks[track].track_draw_proportion=0.8
my_tracks[track].track_height=track_height*vertical_scaling_factor
if left_proportion==1:
my_tracks[track].track_length=(width-margin)-((width-(margin*2))*0.2+margin)
my_tracks[track].track_position=[(width-(margin*2))*0.2+margin, margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)]
else:
my_tracks[track].track_length=(width-margin)-((width-(margin*2))*left_proportion+margin)
my_tracks[track].track_position=[(width-(margin*2))*left_proportion+margin, margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)]
my_tracks[track].track_number=track_number
if track in treenames:
tree.node(tree_name_to_node[track]).data.comment["vertpos"]=margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)
my_tracks[track].grey_track_colour=colors.Color(0,0,0)
track_number+=track_height
#find the maximum feature endpoint to scale by
max_feature_length=0
for track in my_tracks:
max_track_feature_length=my_tracks[track].get_max_feature_length()
if max_track_feature_length>max_feature_length:
max_feature_length=max_track_feature_length
for plot in my_tracks[track].plots:
for data in plot.xdata:
if data[-1]>max_feature_length:
max_feature_length=data[-1]
#tell each track what the max feature length is
for track in my_tracks:
if my_tracks[track].max_feature_length<max_feature_length:
my_tracks[track].max_feature_length=max_feature_length
beginning=0
end=max_feature_length
for track in output_order:
if not track in my_tracks or (my_tracks[track].is_key and fragment!=1) or my_tracks[track].track_length==0:
continue
my_tracks[track].beginning=beginning
my_tracks[track].end=end
my_tracks[track].track_position[1]=margin+(((my_tracks[track].track_number)*vertical_scaling_factor)+(my_tracks[track].track_height)/2)
my_tracks[track].sort_features_by_length()
my_tracks[track].draw_track()
if options.tree!="":
drawtree(tree, height-(margin*2), (width-(margin*2))*left_proportion, margin, 0, 5)
renderPDF.drawToFile(d, options.outputfile)
class DrawerError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| gpl-2.0 | -7,902,293,239,757,884,000 | 31.198773 | 323 | 0.685657 | false | 3.227401 | false | false | false |
jupyter/jupyterlab | examples/cell/main.py | 4 | 2644 | """
An example demonstrating a stand-alone "notebook".
Copyright (c) Jupyter Development Team.
Distributed under the terms of the Modified BSD License.
Example
-------
To run the example, see the instructions in the README to build it. Then
run ``python main.py``.
"""
import os
import json
from jupyterlab_server import LabServerApp
from jupyter_server.base.handlers import JupyterHandler
from jupyter_server.extension.handler import ExtensionHandlerMixin, ExtensionHandlerJinjaMixin
from jupyter_server.utils import url_path_join as ujoin
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'package.json')) as fid:
version = json.load(fid)['version']
def _jupyter_server_extension_points():
return [
{
'module': __name__,
'app': ExampleApp
}
]
class ExampleHandler(
ExtensionHandlerJinjaMixin,
ExtensionHandlerMixin,
JupyterHandler
):
"""Handle requests between the main app page and notebook server."""
def get(self):
"""Get the main page for the application's interface."""
config_data = {
# Use camelCase here, since that's what the lab components expect
"appVersion": version,
'baseUrl': self.base_url,
'token': self.settings['token'],
'fullStaticUrl': ujoin(self.base_url, 'static', self.name),
'frontendUrl': ujoin(self.base_url, 'example/'),
}
return self.write(
self.render_template(
'index.html',
static=self.static_url,
base_url=self.base_url,
token=self.settings['token'],
page_config=config_data
)
)
class ExampleApp(LabServerApp):
extension_url = '/example'
default_url = '/example'
app_url = "/example"
name = __name__
load_other_extensions = False
app_name = 'JupyterLab Example Cell'
static_dir = os.path.join(HERE, 'build')
templates_dir = os.path.join(HERE, 'templates')
app_version = version
app_settings_dir = os.path.join(HERE, 'build', 'application_settings')
schemas_dir = os.path.join(HERE, 'build', 'schemas')
themes_dir = os.path.join(HERE, 'build', 'themes')
user_settings_dir = os.path.join(HERE, 'build', 'user_settings')
workspaces_dir = os.path.join(HERE, 'build', 'workspaces')
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list.
"""
self.handlers.append(
('/example', ExampleHandler)
)
if __name__ == '__main__':
ExampleApp.launch_instance()
| bsd-3-clause | -5,810,901,252,969,317,000 | 28.707865 | 94 | 0.621785 | false | 3.820809 | false | false | false |
cjvogl/finite_volume_seismic_model | 3d/setplot_pwaves.py | 1 | 3701 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
import os, shutil
from mapping import Mapping
import dtopotools_horiz_okada_and_1d as dtopotools
length_scale = 1.0e-3 # m to km
xlimits = [-150.0e3*length_scale,200.0e3*length_scale]
zlimits = [-175.0e3*length_scale,0.0]
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
slice_number = 3
tmpdir = os.path.abspath(os.curdir)
os.chdir(plotdata.outdir)
for filename in os.listdir('.'):
if (filename.startswith('slice_%d' % slice_number)):
shutil.copyfile(filename,filename.replace('slice_%d' % slice_number,'fort',1))
fault = dtopotools.Fault()
fault.read('fault.data')
os.chdir(tmpdir)
mapping = Mapping(fault)
xp1 = mapping.xp1*length_scale
xp2 = mapping.xp2*length_scale
zp1 = mapping.zp1*length_scale
zp2 = mapping.zp2*length_scale
xcenter = mapping.xcenter
ycenter = mapping.ycenter
def mapc2p(xc,yc):
xp,yp = mapping.mapc2p_xz(xc,yc)
return xp*length_scale,yp*length_scale
def plot_fault(current_data):
from pylab import linspace, plot, xlabel, ylabel, tick_params
xl = linspace(xp1,xp2,100)
zl = linspace(zp1,zp2,100)
plot(xl,zl,'g',linewidth=3)
tick_params(labelsize=25)
xlabel('kilometers',fontsize=25)
ylabel('kilometers',fontsize=25)
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
#plotdata.format = 'binary'
def sigmatr(current_data):
# return -trace(sigma)
q = current_data.q
return -(q[0,:,:] + q[1,:,:] + q[2,:,:])
# Figure for trace(sigma)
plotfigure = plotdata.new_plotfigure(name='fault', figno=1)
plotfigure.kwargs = {'figsize':(11,6)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = zlimits
plotaxes.title_with_t = False
plotaxes.title = ''
plotaxes.scaled = True
plotaxes.afteraxes = plot_fault
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = sigmatr
plotitem.pcolor_cmap = colormaps.blue_white_red
plotitem.pcolor_cmin = -1e6
plotitem.pcolor_cmax = 1e6
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotitem.MappedGrid = True
plotitem.mapc2p = mapc2p
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
# plotdata.parallel = True
return plotdata
| gpl-3.0 | -4,647,494,930,394,782,000 | 31.464912 | 90 | 0.640367 | false | 3.272325 | false | false | false |
VitorHugoAguiar/ProBot | ProBot_BeagleBone/PIDControllersFile.py | 1 | 4310 | #!/usr/bin/python
# Python Standard Library Imports
import time
# Local files
import ProBotConstantsFile
# Initialization of classes from local files
Pconst = ProBotConstantsFile.Constants()
# PID functions
class PIDControllersClass():
# Build a constructor
def __init__(self):
self.error = 0
self.SaberTooth_KpV = 280
self.SaberTooth_KiV = 0.6
self.SaberTooth_KdV = 12
self.SaberTooth_KpA = 18
self.SaberTooth_KiA = 2.2
self.SaberTooth_KdA = -2
self.PWM_KpV = 75
self.PWM_KiV = 0.6
self.PWM_KdV = 0.2
self.PWM_KpA = 9
self.PWM_KiA = 3
self.PWM_KdA = -0.001
self.limitV = 800
self.limitA = 1000
self.integrated_error_V1 = 0
self.integrated_error_V2 = 0
self.integrated_error_A1 = 0
self.integrated_error_A2 = 0
self.last_error_V1 = 0
self.last_error_V2 = 0
self.last_error_A1 = 0
self.last_error_A2 = 0
def standardPID(self, reference, measured, type, userChoice):
self.error = float(reference - measured)
# Load the right values for the controllers, depending on if we are using Sabertooth of PWM controller
if userChoice=='1':
KpV = self.SaberTooth_KpV
KiV = self.SaberTooth_KiV
KdV = self.SaberTooth_KdV
KpA = self.SaberTooth_KpA
KiA = self.SaberTooth_KiA
KdA = self.SaberTooth_KdA
if userChoice=='2':
KpV = self.PWM_KpV
KiV = self.PWM_KiV
KdV = self.PWM_KdV
KpA = self.PWM_KpA
KiA = self.PWM_KiA
KdA = self.PWM_KdA
# Loading the variables for the controllers
typeController = {
'Velocity1': [KpV, KiV, KdV, self.limitV, self.integrated_error_V1, self.last_error_V1],
'Velocity2': [KpV, KiV, KdV, self.limitV, self.integrated_error_V2, self.last_error_V2],
'Angle1': [KpA, KiA, KdA, self.limitA, self.integrated_error_A1, self.last_error_A1],
'Angle2': [KpA, KiA, KdA, self.limitA, self.integrated_error_A2, self.last_error_A2]}
controllerVar = typeController[type]
# Code for the PID controllers
pTerm = float(controllerVar[0] * self.error)
controllerVar[4] += float(self.error)
# Limiting the integrated error, avoiding windup
controllerVar[4] = max(-controllerVar[3], min(controllerVar[4], controllerVar[3]))
iTerm = float(controllerVar[1] * controllerVar[4])
dTerm = float(controllerVar[2] * (self.error - controllerVar[5]))
controllerVar[5] = self.error
PID_result = float(pTerm + iTerm + dTerm)
# Updating the integrated error and the last error for the next loop
if(type is 'Velocity1'):
self.integrated_error_V1 = controllerVar[4]
self.last_error_V1 = controllerVar[5]
if(type is 'Velocity2'):
self.integrated_error_V2 = controllerVar[4]
self.last_error_V2 = controllerVar[5]
if(type is 'Angle1'):
self.integrated_error_A1 = controllerVar[4]
self.last_error_A1 = controllerVar[5]
if userChoice == '1':
PID_result = max(-127, min(PID_result, 127)) #Limiting the PID values because of the Sabertooth range (-127, 127)
if userChoice == '2':
PID_result = max(-100, min(PID_result, 100)) #Limiting the percentage of the PWM
if(type is 'Angle2'):
self.integrated_error_A2 = controllerVar[4]
self.last_error_A2 = controllerVar[5]
if userChoice=='1': #Limiting the PID values because of the Sabertooth range (-127, 127)
PID_result = max(-127, min(PID_result, 127))
if userChoice=='2':
PID_result = max(-100, min(PID_result, 100)) #Limiting the percentage of the PWM
return -PID_result
| agpl-3.0 | -3,683,972,053,555,202,600 | 39.280374 | 163 | 0.547796 | false | 3.420635 | false | false | false |
FarzanHajian/CreateSwap | src/createswap2.py | 1 | 4347 | #!/usr/bin/env python
# encoding: utf-8
'''
create_swap.py
A Python 2 script for creating and removing Linux swap files.
Copyright (C) 2016 Farzan Hajian
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
@author: Farzan Hajian
@copyright: 2016. All rights reserved.
@license: GPL3
@contact: [email protected]
NOTE:
THIS SCRIPT WORKS ONLY WITH PYTHON VERSION 2.
FOR PYTHON 3, USE "createswap.py".
'''
import sys
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='the file full name on which the swap space is going to be built (must be used with --size option)')
parser.add_argument('-s', '--size', help='size of the swap space in megabytes (must be used with --file option)', type=int)
parser.add_argument('-o', '--off', help='removes the swap file and disables its swap space', metavar='FILE')
parser.add_argument('--verbose', help='executes in the verbose mode (useful for tracking errors)', action='store_true')
args = parser.parse_args()
try:
if not args.file and not args.size and not args.off:
if not args.verbose:
parser.print_help()
raise Exception()
else:
raise Exception("--verbose option cannot be used alone")
if(args.file and not args.size) or (not args.file and args.size):
raise Exception("--file and --size options must be used together")
if args.off and (args.file or args.size):
raise Exception("--off option cannot be used with other options")
except Exception as ex:
show_error(ex, 3)
return args
def is_verbose():
return args.verbose
def print_header():
os.system('clear')
print('-'*50)
print('createswap.py v 2.0 (Python 2)\n')
print('This program is published under GPL v3 license')
print('You can contact me at [email protected]')
print('-'*50)
def print_step(message):
if is_verbose():
print ("")
print '%-40.40s'%message
else:
print '%-40.40s'%message,
def print_status(is_failed=False):
status = ('Failed' if is_failed else 'OK')
print('[%s]'%status)
def show_error(exception, exit_code):
print('\n%s'%exception)
sys.exit(exit_code)
def sudo():
os.system('sudo id > /dev/null')
def exec_step(message, command, arg_tuple=None):
print_step(message)
command = 'sudo ' + command
if not is_verbose(): command += ' > /dev/null 2>&1'
if arg_tuple != None:
exit_code = os.system(command.format(*arg_tuple))
else:
exit_code = os.system(command)
if exit_code == 0:
print_status()
else:
print_status(True)
def create_swap(filename, size):
try:
tuple1 = (filename, size)
tuple2 = (filename,)
exec_step('Creating the file', 'dd if=/dev/zero of={} bs=1M count={}', tuple1)
exec_step('Setting the file access mode', 'chmod 600 {}', tuple2)
exec_step('Setting up the swap space', 'mkswap {}', tuple2)
exec_step('Enabling the swap space', 'swapon {}', tuple2)
except Exception as ex:
show_error(ex, 2)
def drop_swap(filename):
try:
tuple1 = (filename,)
exec_step('Disabling the swap space', 'swapoff {}', tuple1)
exec_step('Removing the file', 'rm {}', tuple1)
except Exception as ex:
show_error(ex, 2)
# program entry point
print_header()
args = parse_args()
sudo()
if args.file:
create_swap(args.file, args.size)
elif args.off:
drop_swap(args.off)
print("")
| gpl-3.0 | -7,813,304,307,696,668,000 | 30.5 | 145 | 0.635381 | false | 3.72813 | false | false | false |
hbldh/sudokuextract | sudokuextract/imgproc/binary.py | 1 | 1736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`binary`
==================
Created by hbldh <[email protected]>
Created on 2016-01-26
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from skimage.transform import resize
from skimage.filters import threshold_otsu
from skimage.filters import gaussian_filter, threshold_adaptive
def to_binary_otsu(img, invert=False):
if img.dtype == np.bool:
img = np.array(img, 'uint8')
if img.max() == img.min():
if img.min() == 1:
return np.array(img * 255, 'uint8')
else:
return np.array(img, 'uint8')
else:
t = threshold_otsu(img)
img[img <= t] = 255 if invert else 0
img[img > t] = 0 if invert else 255
return np.array(img, 'uint8')
def to_binary_adaptive(img):
sigma = 1.0
m = max(img.shape)
if m > 2000:
block_size = 80
elif m > 1500:
block_size = 50
elif m > 1000:
block_size = 35
else:
block_size = 20
bimg = gaussian_filter(img, sigma=sigma)
bimg = threshold_adaptive(bimg, block_size, offset=2 / 255)
bimg = np.array(bimg, 'uint8') * 255
return bimg
def add_border(img, size=(28, 28), border_size=0, background_value=255):
img = resize(img, (size[0] - border_size * 2,
size[1] - border_size * 2))
img = np.array(img * 255, 'uint8')
output_img = np.ones(size, 'uint8') * background_value
if border_size == 0:
output_img[:, :] = img
else:
output_img[border_size:-border_size, border_size:-border_size] = img
return output_img
| mit | 2,918,771,141,372,004,400 | 24.910448 | 76 | 0.597926 | false | 3.197053 | false | false | false |
GerbenJavado/LinkFinder | linkfinder.py | 1 | 13951 | #!/usr/bin/env python
# Python 3
# LinkFinder
# By Gerben_Javado
# Fix webbrowser bug for MacOS
import os
os.environ["BROWSER"] = "open"
# Import libraries
import re, sys, glob, html, argparse, jsbeautifier, webbrowser, subprocess, base64, ssl, xml.etree.ElementTree
from gzip import GzipFile
from string import Template
try:
from StringIO import StringIO
readBytesCustom = StringIO
except ImportError:
from io import BytesIO
readBytesCustom = BytesIO
try:
from urllib.request import Request, urlopen
except ImportError:
from urllib2 import Request, urlopen
# Regex used
regex_str = r"""
(?:"|') # Start newline delimiter
(
((?:[a-zA-Z]{1,10}://|//) # Match a scheme [a-Z]*1-10 or //
[^"'/]{1,}\. # Match a domainname (any character + dot)
[a-zA-Z]{2,}[^"']{0,}) # The domainextension and/or path
|
((?:/|\.\./|\./) # Start with /,../,./
[^"'><,;| *()(%%$^/\\\[\]] # Next character can't be...
[^"'><,;|()]{1,}) # Rest of the characters can't be
|
([a-zA-Z0-9_\-/]{1,}/ # Relative endpoint with /
[a-zA-Z0-9_\-/]{1,} # Resource name
\.(?:[a-zA-Z]{1,4}|action) # Rest + extension (length 1-4 or action)
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
|
([a-zA-Z0-9_\-/]{1,}/ # REST API (no extension) with /
[a-zA-Z0-9_\-/]{3,} # Proper REST endpoints usually have 3+ chars
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
|
([a-zA-Z0-9_\-]{1,} # filename
\.(?:php|asp|aspx|jsp|json|
action|html|js|txt|xml) # . + extension
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
)
(?:"|') # End newline delimiter
"""
context_delimiter_str = "\n"
def parser_error(errmsg):
'''
Error Messages
'''
print("Usage: python %s [Options] use -h for help" % sys.argv[0])
print("Error: %s" % errmsg)
sys.exit()
def parser_input(input):
'''
Parse Input
'''
# Method 1 - URL
if input.startswith(('http://', 'https://',
'file://', 'ftp://', 'ftps://')):
return [input]
# Method 2 - URL Inspector Firefox
if input.startswith('view-source:'):
return [input[12:]]
# Method 3 - Burp file
if args.burp:
jsfiles = []
items = xml.etree.ElementTree.fromstring(open(args.input, "r").read())
for item in items:
jsfiles.append({"js":base64.b64decode(item.find('response').text).decode('utf-8',"replace"), "url":item.find('url').text})
return jsfiles
# Method 4 - Folder with a wildcard
if "*" in input:
paths = glob.glob(os.path.abspath(input))
for index, path in enumerate(paths):
paths[index] = "file://%s" % path
return (paths if len(paths) > 0 else parser_error('Input with wildcard does \
not match any files.'))
# Method 5 - Local file
path = "file://%s" % os.path.abspath(input)
return [path if os.path.exists(input) else parser_error("file could not \
be found (maybe you forgot to add http/https).")]
def send_request(url):
'''
Send requests with Requests
'''
q = Request(url)
q.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36')
q.add_header('Accept', 'text/html,\
application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
q.add_header('Accept-Language', 'en-US,en;q=0.8')
q.add_header('Accept-Encoding', 'gzip')
q.add_header('Cookie', args.cookies)
try:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
response = urlopen(q, timeout=args.timeout, context=sslcontext)
except:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
response = urlopen(q, timeout=args.timeout, context=sslcontext)
if response.info().get('Content-Encoding') == 'gzip':
data = GzipFile(fileobj=readBytesCustom(response.read())).read()
elif response.info().get('Content-Encoding') == 'deflate':
data = response.read().read()
else:
data = response.read()
return data.decode('utf-8', 'replace')
def getContext(list_matches, content, include_delimiter=0, context_delimiter_str="\n"):
'''
Parse Input
list_matches: list of tuple (link, start_index, end_index)
content: content to search for the context
include_delimiter Set 1 to include delimiter in context
'''
items = []
for m in list_matches:
match_str = m[0]
match_start = m[1]
match_end = m[2]
context_start_index = match_start
context_end_index = match_end
delimiter_len = len(context_delimiter_str)
content_max_index = len(content) - 1
while content[context_start_index] != context_delimiter_str and context_start_index > 0:
context_start_index = context_start_index - 1
while content[context_end_index] != context_delimiter_str and context_end_index < content_max_index:
context_end_index = context_end_index + 1
if include_delimiter:
context = content[context_start_index: context_end_index]
else:
context = content[context_start_index + delimiter_len: context_end_index]
item = {
"link": match_str,
"context": context
}
items.append(item)
return items
def parser_file(content, regex_str, mode=1, more_regex=None, no_dup=1):
'''
Parse Input
content: string of content to be searched
regex_str: string of regex (The link should be in the group(1))
mode: mode of parsing. Set 1 to include surrounding contexts in the result
more_regex: string of regex to filter the result
no_dup: remove duplicated link (context is NOT counted)
Return the list of ["link": link, "context": context]
The context is optional if mode=1 is provided.
'''
global context_delimiter_str
if mode == 1:
# Beautify
if len(content) > 1000000:
content = content.replace(";",";\r\n").replace(",",",\r\n")
else:
content = jsbeautifier.beautify(content)
regex = re.compile(regex_str, re.VERBOSE)
if mode == 1:
all_matches = [(m.group(1), m.start(0), m.end(0)) for m in re.finditer(regex, content)]
items = getContext(all_matches, content, context_delimiter_str=context_delimiter_str)
else:
items = [{"link": m.group(1)} for m in re.finditer(regex, content)]
if no_dup:
# Remove duplication
all_links = set()
no_dup_items = []
for item in items:
if item["link"] not in all_links:
all_links.add(item["link"])
no_dup_items.append(item)
items = no_dup_items
# Match Regex
filtered_items = []
for item in items:
# Remove other capture groups from regex results
if more_regex:
if re.search(more_regex, item["link"]):
filtered_items.append(item)
else:
filtered_items.append(item)
return filtered_items
def cli_output(endpoints):
'''
Output to CLI
'''
for endpoint in endpoints:
print(html.escape(endpoint["link"]).encode(
'ascii', 'ignore').decode('utf8'))
def html_save(html):
'''
Save as HTML file and open in the browser
'''
hide = os.dup(1)
os.close(1)
os.open(os.devnull, os.O_RDWR)
try:
s = Template(open('%s/template.html' % sys.path[0], 'r').read())
text_file = open(args.output, "wb")
text_file.write(s.substitute(content=html).encode('utf8'))
text_file.close()
print("URL to access output: file://%s" % os.path.abspath(args.output))
file = "file:///%s" % os.path.abspath(args.output)
if sys.platform == 'linux' or sys.platform == 'linux2':
subprocess.call(["xdg-open", file])
else:
webbrowser.open(file)
except Exception as e:
print("Output can't be saved in %s \
due to exception: %s" % (args.output, e))
finally:
os.dup2(hide, 1)
def check_url(url):
nopelist = ["node_modules", "jquery.js"]
if url[-3:] == ".js":
words = url.split("/")
for word in words:
if word in nopelist:
return False
if url[:2] == "//":
url = "https:" + url
if url[:4] != "http":
if url[:1] == "/":
url = args.input + url
else:
url = args.input + "/" + url
return url
else:
return False
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--domain",
help="Input a domain to recursively parse all javascript located in a page",
action="store_true")
parser.add_argument("-i", "--input",
help="Input a: URL, file or folder. \
For folders a wildcard can be used (e.g. '/*.js').",
required="True", action="store")
parser.add_argument("-o", "--output",
help="Where to save the file, \
including file name. Default: output.html",
action="store", default="output.html")
parser.add_argument("-r", "--regex",
help="RegEx for filtering purposes \
against found endpoint (e.g. ^/api/)",
action="store")
parser.add_argument("-b", "--burp",
help="",
action="store_true")
parser.add_argument("-c", "--cookies",
help="Add cookies for authenticated JS files",
action="store", default="")
default_timeout = 10
parser.add_argument("-t", "--timeout",
help="How many seconds to wait for the server to send data before giving up (default: " + str(default_timeout) + " seconds)",
default=default_timeout, type=int, metavar="<seconds>")
args = parser.parse_args()
if args.input[-1:] == "/":
args.input = args.input[:-1]
mode = 1
if args.output == "cli":
mode = 0
# Convert input to URLs or JS files
urls = parser_input(args.input)
# Convert URLs to JS
output = ''
for url in urls:
if not args.burp:
try:
file = send_request(url)
except Exception as e:
parser_error("invalid input defined or SSL error: %s" % e)
else:
file = url['js']
url = url['url']
endpoints = parser_file(file, regex_str, mode, args.regex)
if args.domain:
for endpoint in endpoints:
endpoint = html.escape(endpoint["link"]).encode('ascii', 'ignore').decode('utf8')
endpoint = check_url(endpoint)
if endpoint is False:
continue
print("Running against: " + endpoint)
print("")
try:
file = send_request(endpoint)
new_endpoints = parser_file(file, regex_str, mode, args.regex)
if args.output == 'cli':
cli_output(new_endpoints)
else:
output += '''
<h1>File: <a href="%s" target="_blank" rel="nofollow noopener noreferrer">%s</a></h1>
''' % (html.escape(endpoint), html.escape(endpoint))
for endpoint2 in new_endpoints:
url = html.escape(endpoint2["link"])
header = "<div><a href='%s' class='text'>%s" % (
html.escape(url),
html.escape(url)
)
body = "</a><div class='container'>%s</div></div>" % html.escape(
endpoint2["context"]
)
body = body.replace(
html.escape(endpoint2["link"]),
"<span style='background-color:yellow'>%s</span>" %
html.escape(endpoint2["link"])
)
output += header + body
except Exception as e:
print("Invalid input defined or SSL error for: " + endpoint)
continue
if args.output == 'cli':
cli_output(endpoints)
else:
output += '''
<h1>File: <a href="%s" target="_blank" rel="nofollow noopener noreferrer">%s</a></h1>
''' % (html.escape(url), html.escape(url))
for endpoint in endpoints:
url = html.escape(endpoint["link"])
header = "<div><a href='%s' class='text'>%s" % (
html.escape(url),
html.escape(url)
)
body = "</a><div class='container'>%s</div></div>" % html.escape(
endpoint["context"]
)
body = body.replace(
html.escape(endpoint["link"]),
"<span style='background-color:yellow'>%s</span>" %
html.escape(endpoint["link"])
)
output += header + body
if args.output != 'cli':
html_save(output)
| mit | -3,957,937,958,894,511,600 | 33.70398 | 149 | 0.514587 | false | 3.932074 | false | false | false |
ownport/ansiblite | src/ansiblite/utils/path.py | 1 | 2926 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from errno import EEXIST
from ansiblite.errors import AnsibleError
from ansiblite.utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
if follow:
final_path = os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
else:
final_path = os.path.normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
return to_text(final_path, errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''Safe way to create dirs in muliprocess/thread environments.
:arg path: A byte or text string representing a directory to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exists.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
| gpl-3.0 | -6,000,778,393,953,600,000 | 40.211268 | 141 | 0.698906 | false | 3.880637 | false | false | false |
rigdenlab/SIMBAD | simbad/command_line/simbad_full.py | 1 | 6513 | #!/usr/bin/env python
__author__ = "Adam Simpkin, and Felix Simkovic"
__contributing_authors__ = "Jens Thomas, and Ronan Keegan"
__credits__ = "Daniel Rigden, William Shepard, Charles Ballard, Villi Uski, and Andrey Lebedev"
__date__ = "05 May 2017"
__email__ = "[email protected]"
__version__ = "0.1"
import argparse
import os
import sys
from pyjob.stopwatch import StopWatch
import simbad.command_line
import simbad.exit
import simbad.util
import simbad.util.logging_util
import simbad.util.pyrvapi_results
logger = None
def simbad_argparse():
"""Create the argparse options"""
p = argparse.ArgumentParser(
description="SIMBAD: Sequence Independent Molecular replacement Based on Available Database",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
simbad.command_line._argparse_core_options(p)
simbad.command_line._argparse_job_submission_options(p)
simbad.command_line._argparse_contaminant_options(p)
simbad.command_line._argparse_morda_options(p)
simbad.command_line._argparse_lattice_options(p)
simbad.command_line._argparse_rot_options(p)
simbad.command_line._argparse_mr_options(p)
simbad.command_line._argparse_mtz_options(p)
p.add_argument('mtz', help="The path to the input mtz file")
return p
def main():
"""Main SIMBAD routine"""
args = simbad_argparse().parse_args()
args.work_dir = simbad.command_line.get_work_dir(
args.run_dir, work_dir=args.work_dir, ccp4_jobid=args.ccp4_jobid, ccp4i2_xml=args.ccp4i2_xml
)
log_file = os.path.join(args.work_dir, 'simbad.log')
debug_log_file = os.path.join(args.work_dir, 'debug.log')
global logger
logger = simbad.util.logging_util.setup_logging(args.debug_lvl, logfile=log_file, debugfile=debug_log_file)
if not os.path.isfile(args.amore_exe):
raise OSError("amore executable not found")
gui = simbad.util.pyrvapi_results.SimbadOutput(
args.rvapi_document, args.webserver_uri, args.display_gui, log_file, args.work_dir, ccp4i2_xml=args.ccp4i2_xml, tab_prefix=args.tab_prefix
)
simbad.command_line.print_header()
logger.info("Running in directory: %s\n", args.work_dir)
stopwatch = StopWatch()
stopwatch.start()
end_of_cycle, solution_found, all_results = False, False, {}
while not (solution_found or end_of_cycle):
# =====================================================================================
# Perform the lattice search
solution_found = simbad.command_line._simbad_lattice_search(args)
logger.info("Lattice search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Lucky you! SIMBAD worked its charm and found a lattice match for you.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to contaminant search")
else:
logger.info("No results found - lattice search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'latt/lattice_mr.csv')
all_results['latt'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the contaminant search
solution_found = simbad.command_line._simbad_contaminant_search(args)
logger.info("Contaminant search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Check you out, crystallizing contaminants! But don't worry, SIMBAD figured it out and found a solution.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to morda search")
else:
logger.info(
"No results found - contaminant search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'cont/cont_mr.csv')
all_results['cont'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the morda search
solution_found = simbad.command_line._simbad_morda_search(args)
logger.info("Full MoRDa domain search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found:
logger.info("... and SIMBAD worked once again. Get in!")
continue
else:
logger.info("No results found - full search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'morda/morda_mr.csv')
all_results['morda'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Make sure we only run the loop once for now
end_of_cycle = True
if len(all_results) >= 1:
if sys.version_info.major == 3:
sorted_results = sorted(all_results.items(), key=lambda kv: (kv[1], kv))
else:
sorted_results = sorted(all_results.iteritems(), key=lambda kv: (kv[1], kv))
result = sorted_results[0][1]
simbad.util.output_files(args.work_dir, result, args.output_pdb, args.output_mtz)
stopwatch.stop()
logger.info("All processing completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.time_pretty)
gui.display_results(True, args.results_to_display)
if args.rvapi_document:
gui.save_document()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.NOTSET)
try:
main()
except Exception:
simbad.exit.exit_error(*sys.exc_info())
| bsd-3-clause | 6,700,612,757,377,132,000 | 39.70625 | 146 | 0.614617 | false | 3.560962 | false | false | false |
mdavidsaver/spicetools | spicetools/bench/fileframe.py | 1 | 2613 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Michael Davidsaver
License is GPL3+, see file LICENSE for details
"""
import logging
_log=logging.getLogger(__name__)
import os, os.path
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from .fileframe_ui import Ui_FileFrame
class FileFrame(QtGui.QFrame):
fileChanged = QtCore.pyqtSignal(QtCore.QString)
typeChanged = QtCore.pyqtSignal(bool)
def __init__(self, parent):
super(FileFrame, self).__init__(parent)
self.ui = Ui_FileFrame()
self.ui.setupUi(self)
self.dia = QtGui.QFileDialog(self, "Select Net of Schem.",
os.getcwd(),
"Net/Schem. (*.net *.sch);;All (*)")
self.dia.fileSelected.connect(self.setFile)
self.ui.fileBox.activated.connect(self._fileChange)
self.ui.typeBox.currentIndexChanged.connect(self._typeChanged)
self.ui.fileBtn.clicked.connect(self._select_existing)
A = QtGui.QAction("&Create file", self.ui.fileBtn)
self.ui.fileBtn.addAction(A)
A.activated.connect(self._select_new)
A = QtGui.QAction("S&elect file", self.ui.fileBtn)
A.activated.connect(self._select_existing)
self.ui.fileBtn.addAction(A)
def _select_existing(self):
self.dia.setFileMode(self.dia.ExistingFile)
self.dia.setAcceptMode(self.dia.AcceptOpen)
self.dia.exec_()
def _select_new(self):
self.dia.setFileMode(self.dia.AnyFile)
self.dia.setAcceptMode(self.dia.AcceptSave)
R = self.dia.exec_()
if not R:
return
F = str(self.dia.selectedFiles()[0])
_log.info("Create %s", F)
with open(F, 'w') as F:
pass # create empty file
def clear(self):
self.setFile('')
self.setType(True)
def _fileChange(self):
self.fileChanged.emit(self.ui.fileBox.currentText())
def _typeChanged(self, i):
self.typeChanged.emit(i==1)
def setFile(self, fname):
self.dia.selectFile(fname)
self.ui.fileBox.setEditText(fname)
self.fileChanged.emit(fname)
def setType(self, B):
self.ui.typeBox.setCurrentIndex(1 if B else 0)
def file(self):
return self.ui.fileBox.currentText()
def type(self):
return self.ui.typeBox.currentIndex()==1
file = QtCore.pyqtProperty(QtCore.QString, file, setFile,
notify=fileChanged)
type = QtCore.pyqtProperty(bool, type, setType,
notify=typeChanged)
| gpl-3.0 | -8,681,363,298,104,596,000 | 28.359551 | 73 | 0.606965 | false | 3.589286 | false | false | false |
zathras777/pywind | pywind/ofgem/objects.py | 1 | 8218 | # coding=utf-8
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from datetime import datetime
from pprint import pprint
from pywind.utils import map_xml_to_dict
class OfgemObjectBase(object):
XML_MAPPING = None
def __init__(self, node):
""" Extract information from the supplied XML node.
The factor figure is MWh per certificate.
"""
if self.XML_MAPPING is None:
raise NotImplementedError("Child classes should define their XML_MAPPING")
self.attrs = map_xml_to_dict(node, self.XML_MAPPING)
# pprint(self.attrs)
def __getattr__(self, item):
if item in self.attrs:
return self.attrs[item]
raise AttributeError(item)
def as_row(self):
"""
Return the information in correct format for :func:`rows()` usage
:returns: Formatted attribute dict
:rtype: dict
"""
return {'@{}'.format(key): self.attrs[key] for key in self.attrs.keys()}
class Certificates(OfgemObjectBase):
""" Certificate Number Fact Sheet
https://www.ofgem.gov.uk/sites/default/files/docs/roc_identifier_fact_sheet_dec_2015.pdf
"""
XML_MAPPING = (
('textbox4', 'generator_id'),
('textbox13', 'name'),
('textbox5', 'scheme'),
('textbox19', 'capacity', 'float', 0.0),
('textbox12', 'country'),
('textbox15', 'technology'),
('textbox31', 'generation_type'),
('textbox18', 'period'),
('textbox21', 'certs', 'int', 0),
('textbox24', 'start_no'),
('textbox27', 'finish_no'),
('textbox37', 'factor', 'float', 0.0),
('textbox30', 'issue_dt', 'date'),
('textbox33', 'status'),
('textbox36', 'status_dt', 'date'),
('textbox39', 'current_holder'),
('textbox45', 'reg_no')
)
def __init__(self, node):
OfgemObjectBase.__init__(self, node)
if self.attrs['period'].startswith("01"):
dt = datetime.strptime(self.attrs['period'][:10], '%d/%m/%Y')
self.attrs['period'] = dt.strftime("%b-%Y")
def __str__(self):
return " {} {} {:5d} {}".format(self.issue_dt.strftime("%Y %b %d"), self.start_no,
self.certs, self.current_holder)
@property
def digits(self):
""" Number of digits that store the certificate number.
:rtype: int
"""
return 10 if self.scheme == 'REGO' else 6
@property
def certificates(self):
""" Number of certificates covered by this object.
:rtype: int
"""
return self.finish - self.start + 1
@property
def start(self):
""" Return the numeric start number for the certificates.
Each certificate number contains the station, period and the number of the certificate,
so this function extracts the numeric part.
:returns: Start number of the certificates referenced
:rtype: int
"""
return int(self.start_no[10:10 + self.digits])
@property
def finish(self):
""" Return the numeric finish number for the certificates.
Each certificate number contains the station, period and the number of the certificate,
so this function extracts the numeric part.
:returns: Finish number of the certificates referenced
:rtype: integer
"""
return int(self.finish_no[10:10 + self.digits])
def output_summary(self):
""" Return a string with the output for the certificates.
:rtype: str
"""
perc = (float(self['certs']) / self['capacity']) * 100
return "%s: %s %s vs %s => %.02f%%" % (self.period, self.name, self.certs,
self.capacity, perc)
def station_details(self):
""" Get a dict object with the station information for these certificates.
:returns: Dict with just information relevant to identifying the station
:rtype: dict
"""
rv_dict = {fld: self.attrs[fld] for fld in ['generator_id',
'name',
'scheme',
'capacity',
'country',
'technology']}
rv_dict['output'] = self.output
return rv_dict
@property
def output(self):
""" Calculate the output based on the number of certs issued and factor.
:returns: Numeric output or 0
:rtype: float
"""
return self.certs / self.factor
class Station(OfgemObjectBase):
"""
Store details of a single station using data from Ofgem.
The exposed object makes the individual pieces of data available by \
acting as a dict, i.e.
.. :code::
name = station['name']
The convenience function :func:`as_string` will return a full list of the data \
formatted for display in a terminal.
"""
XML_MAPPING = (
('GeneratorID', 'generator_id'),
('StatusName', 'status'),
('GeneratorName', 'name'),
('SchemeName', 'scheme'),
('Capacity', '', 'float'),
('Country',),
('TechnologyName', 'technology'),
('OutputType', 'output'),
('AccreditationDate', 'accreditation_dt', 'date'),
('CommissionDate', 'commission_dt', 'date'),
('textbox6', 'developer'),
('textbox61', 'developer_address', 'address'),
('textbox65', 'address', 'address'),
('FaxNumber', 'fax')
)
def __init__(self, node):
OfgemObjectBase.__init__(self, node)
# catch/correct some odd results I have observed...
if self.attrs['technology'] is not None and '\n' in self.attrs['technology']:
self.attrs['technology'] = self.attrs['technology'].split('\n')[0]
class CertificateStation(object):
""" We are normally interested in knowing about certificates issued to
a station, so this class attempts to simplify this process.
Once issued all certificates will be accounted for, but the final
owner and status may change. This class attempts to take a bunch of
Certificate objects and simplify them into a final set, with ownership
and status correctly attributed.
"""
def __init__(self, name, g_id, capacity, scheme):
self.name = name
self.generator_id = g_id
self.scheme = scheme
self.capacity = capacity
self.certs = []
def __len__(self):
return len(self.certs)
def __iter__(self):
for c in self.certs:
yield c
def add_cert(self, cert):
self.certs.append(cert)
def as_row(self):
return [cert.as_row() for cert in self.certs]
| unlicense | 3,872,703,660,219,905,000 | 33.970213 | 100 | 0.586152 | false | 4.258031 | false | false | false |
robertnishihara/ray | python/ray/autoscaler/_private/aws/utils.py | 1 | 4590 | from collections import defaultdict
from ray.autoscaler._private.cli_logger import cli_logger
import colorful as cf
class LazyDefaultDict(defaultdict):
"""
LazyDefaultDict(default_factory[, ...]) --> dict with default factory
The default factory is call with the key argument to produce
a new value when a key is not present, in __getitem__ only.
A LazyDefaultDict compares equal to a dict with the same items.
All remaining arguments are treated the same as if they were
passed to the dict constructor, including keyword arguments.
"""
def __missing__(self, key):
"""
__missing__(key) # Called by __getitem__ for missing key; pseudo-code:
if self.default_factory is None: raise KeyError((key,))
self[key] = value = self.default_factory(key)
return value
"""
self[key] = self.default_factory(key)
return self[key]
def handle_boto_error(exc, msg, *args, **kwargs):
if cli_logger.old_style:
# old-style logging doesn't do anything here
# so we exit early
return
error_code = None
error_info = None
# todo: not sure if these exceptions always have response
if hasattr(exc, "response"):
error_info = exc.response.get("Error", None)
if error_info is not None:
error_code = error_info.get("Code", None)
generic_message_args = [
"{}\n"
"Error code: {}",
msg.format(*args, **kwargs),
cf.bold(error_code)
]
# apparently
# ExpiredTokenException
# ExpiredToken
# RequestExpired
# are all the same pretty much
credentials_expiration_codes = [
"ExpiredTokenException", "ExpiredToken", "RequestExpired"
]
if error_code in credentials_expiration_codes:
# "An error occurred (ExpiredToken) when calling the
# GetInstanceProfile operation: The security token
# included in the request is expired"
# "An error occurred (RequestExpired) when calling the
# DescribeKeyPairs operation: Request has expired."
token_command = (
"aws sts get-session-token "
"--serial-number arn:aws:iam::" + cf.underlined("ROOT_ACCOUNT_ID")
+ ":mfa/" + cf.underlined("AWS_USERNAME") + " --token-code " +
cf.underlined("TWO_FACTOR_AUTH_CODE"))
secret_key_var = (
"export AWS_SECRET_ACCESS_KEY = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.SecretAccessKey")
session_token_var = (
"export AWS_SESSION_TOKEN = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.SessionToken")
access_key_id_var = (
"export AWS_ACCESS_KEY_ID = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.AccessKeyId")
# fixme: replace with a Github URL that points
# to our repo
aws_session_script_url = ("https://gist.github.com/maximsmol/"
"a0284e1d97b25d417bd9ae02e5f450cf")
cli_logger.verbose_error(*generic_message_args)
cli_logger.verbose(vars(exc))
cli_logger.panic("Your AWS session has expired.")
cli_logger.newline()
cli_logger.panic("You can request a new one using")
cli_logger.panic(cf.bold(token_command))
cli_logger.panic("then expose it to Ray by setting")
cli_logger.panic(cf.bold(secret_key_var))
cli_logger.panic(cf.bold(session_token_var))
cli_logger.panic(cf.bold(access_key_id_var))
cli_logger.newline()
cli_logger.panic("You can find a script that automates this at:")
cli_logger.panic(cf.underlined(aws_session_script_url))
# Do not re-raise the exception here because it looks awful
# and we already print all the info in verbose
cli_logger.abort()
# todo: any other errors that we should catch separately?
cli_logger.panic(*generic_message_args)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("Boto3 error:"):
cli_logger.verbose("{}", str(vars(exc)))
cli_logger.panic("{}", str(exc))
cli_logger.abort()
def boto_exception_handler(msg, *args, **kwargs):
# todo: implement timer
class ExceptionHandlerContextManager():
def __enter__(self):
pass
def __exit__(self, type, value, tb):
import botocore
if type is botocore.exceptions.ClientError:
handle_boto_error(value, msg, *args, **kwargs)
return ExceptionHandlerContextManager()
| apache-2.0 | 369,375,581,428,346,560 | 34.859375 | 78 | 0.621133 | false | 3.919727 | false | false | false |
zaneveld/picrust | picrust/util.py | 1 | 16630 | #!/usr/bin/env python
# File created on 23 Nov 2011
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2015, The PICRUSt Project"
__credits__ = ["Greg Caporaso", "Morgan Langille", "Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.1.0"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
__status__ = "Development"
from json import dumps
from os.path import abspath, dirname, isdir
from os import makedirs
from cogent.core.tree import PhyloNode, TreeError
from numpy import array, asarray, atleast_1d
from biom import Table, parse_table
from biom.table import vlen_list_of_str_formatter
from biom.util import biom_open, HAVE_H5PY
from subprocess import Popen, PIPE
import StringIO
def make_sample_transformer(scaling_factors):
def transform_sample(sample_value,sample_id,sample_metadata):
scaling_factor = scaling_factors[sample_id]
new_val = sample_value * scaling_factor
return new_val
return transform_sample
def scale_metagenomes(metagenome_table,scaling_factors):
""" scale metagenomes from metagenome table and scaling factors
"""
transform_sample_f = make_sample_transformer(scaling_factors)
new_metagenome_table = metagenome_table.transform(transform_sample_f)
return new_metagenome_table
def convert_precalc_to_biom(precalc_in, ids_to_load=None,transpose=True,md_prefix='metadata_'):
"""Loads PICRUSTs tab-delimited version of the precalc file and outputs a BIOM object"""
#if given a string convert to a filehandle
if type(precalc_in) ==str or type(precalc_in) == unicode:
fh = StringIO.StringIO(precalc_in)
else:
fh=precalc_in
#first line has to be header
header_ids=fh.readline().strip().split('\t')
col_meta_locs={}
for idx,col_id in enumerate(header_ids):
if col_id.startswith(md_prefix):
col_meta_locs[col_id[len(md_prefix):]]=idx
end_of_data=len(header_ids)-len(col_meta_locs)
trait_ids = header_ids[1:end_of_data]
col_meta=[]
row_meta=[{} for i in trait_ids]
if ids_to_load is not None and len(ids_to_load) > 0:
ids_to_load=set(ids_to_load)
load_all_ids=False
else:
load_all_ids=True
matching=[]
otu_ids=[]
for line in fh:
fields = line.strip().split('\t')
row_id=fields[0]
if(row_id.startswith(md_prefix)):
#handle metadata
#determine type of metadata (this may not be perfect)
metadata_type=determine_metadata_type(line)
for idx,trait_name in enumerate(trait_ids):
row_meta[idx][row_id[len(md_prefix):]]=parse_metadata_field(fields[idx+1],metadata_type)
elif load_all_ids or (row_id in set(ids_to_load)):
otu_ids.append(row_id)
matching.append(map(float,fields[1:end_of_data]))
#add metadata
col_meta_dict={}
for meta_name in col_meta_locs:
col_meta_dict[meta_name]=fields[col_meta_locs[meta_name]]
col_meta.append(col_meta_dict)
if not load_all_ids:
ids_to_load.remove(row_id)
if not otu_ids:
raise ValueError,"No OTUs match identifiers in precalculated file. PICRUSt requires an OTU table reference/closed picked against GreenGenes.\nExample of the first 5 OTU ids from your table: {0}".format(', '.join(list(ids_to_load)[:5]))
if ids_to_load:
raise ValueError,"One or more OTU ids were not found in the precalculated file!\nAre you using the correct --gg_version?\nExample of (the {0}) unknown OTU ids: {1}".format(len(ids_to_load),', '.join(list(ids_to_load)[:5]))
#note that we transpose the data before making biom obj
matching = asarray(matching)
if transpose:
return Table(matching.T, trait_ids, otu_ids, row_meta, col_meta,
type='Gene table')
else:
return Table(matching, otu_ids, trait_ids, col_meta, row_meta,
type='Gene table')
def convert_biom_to_precalc(biom_table):
"""Converts a biom table into a PICRUSt precalculated tab-delimited file """
col_ids = biom_table.ids(axis='observation')
row_ids = biom_table.ids()
lines = []
header = ['#OTU_IDs'] + list(col_ids)
col_metadata_names = []
# peak at metadata for Samples (e.g. NSTI) so we can set the header
if biom_table.metadata():
col_metadata_names = biom_table.metadata()[0].keys()
#add the metadata names to the header
for col_metadata_name in col_metadata_names:
header.append('metadata_' + col_metadata_name)
lines.append(map(str, header))
row_metadata_names = []
# peak at metadata for observations (e.g. KEGG_Pathways)
if biom_table.metadata(axis='observation'):
row_metadata_names = biom_table.metadata(axis='observation')[0].keys()
for metadata_name in row_metadata_names:
metadata_line = ['metadata_' + metadata_name]
# do the observation metadata now
for col_id in col_ids:
metadata = biom_table.metadata(axis='observation')[biom_table.index(col_id, axis='observation')]
metadata_line.append(biom_meta_to_string(metadata[metadata_name]))
lines.append(map(str, metadata_line))
# transpose the actual count data
transposed_table = biom_table._data.T
for idx, count in enumerate(transposed_table.toarray()):
line = [row_ids[idx]] + map(str, count)
# add the metadata values to the end of the row now
for meta_name in col_metadata_names:
line.append(biom_table.metadata()[idx][meta_name])
lines.append(line)
return "\n".join("\t".join(map(str, x)) for x in lines)
def determine_metadata_type(line):
if ';' in line:
if '|' in line:
return 'list_of_lists'
else:
return 'list'
else:
return 'string'
def parse_metadata_field(metadata_str,metadata_format='string'):
if metadata_format == 'string':
return metadata_str
elif metadata_format == 'list':
return [e.strip() for e in metadata_str.split(';')]
elif metadata_format == 'list_of_lists':
return [[e.strip() for e in y.split(';')] for y in metadata_str.split('|')]
def biom_meta_to_string(metadata):
""" Determine which format the metadata is and then convert to a string"""
#Note that since ';' and '|' are used as seperators we must replace them if they exist
if type(metadata) ==str or type(metadata)==unicode:
return metadata.replace(';',':')
elif type(metadata) == list:
if type(metadata[0]) == list:
return "|".join(";".join([y.replace(';',':').replace('|',':') for y in x]) for x in metadata)
else:
return ";".join(x.replace(';',':') for x in metadata)
def system_call(cmd, shell=True):
"""Call cmd and return (stdout, stderr, return_value).
cmd can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
Please see Python's subprocess.Popen for a description of the shell
parameter and how cmd is interpreted differently based on its value.
This code was copied from QIIME's qiime_system_call() (util.py) function on June 3rd, 2013.
"""
proc = Popen(cmd, shell=shell, universal_newlines=True, stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
def file_contains_nulls(file):
"""Checks given file for null characters. These are sometimes created on SGE clusters when system IO is overloaded."""
return '\x00' in open(file,'rb').read()
def parse_table_to_biom(table_lines, table_format="tab-delimited",\
biom_format = 'otu table'):
"""Read the lines of an open trait table file, and output a .biom table object
The trait table must be either a biom file, or a picrust tab-delimited file
table_format -- must be either 'tab-delimited' or 'biom'
"""
return parse_table(table_lines)
def get_picrust_project_dir():
""" Returns the top-level PICRUST directory
"""
# Get the full path of util.py
current_file_path = abspath(__file__)
# Get the directory containing util.py
current_dir_path = dirname(current_file_path)
# Return the directory containing the directory containing util.py
return dirname(current_dir_path)
def transpose_trait_table_fields(data_fields,header,id_row_idx=0,\
input_header_delimiter="\t",output_delimiter="\t"):
"""Transpose the fields of a trait table, returning new data_fields,header
data_fields: list of lists for data fields
header: a string describing the header_line
id_row_idx: index of row labels. Almost always 0 but included for
but included for completeness
input_header_delimiter: delimiter for fields in the header string
output_delimiter: use this delimiter to join header fields
NOTE: typically the header and data fields are generated
by parse_trait_table in picrust.parse
"""
header_fields = header.split(input_header_delimiter)
# ensure no trailing newlines
old_header_fields = [h.strip() for h in header_fields]
new_header_fields = [old_header_fields[0]] + \
[df[id_row_idx].strip() for df in data_fields]
non_label_data_fields = []
for row in data_fields:
non_label_fields = [e for i, e in enumerate(row) if i != id_row_idx]
non_label_data_fields.append(non_label_fields)
data_array = array(non_label_data_fields)
new_data_array = data_array.T
new_rows = []
for i,row in enumerate(new_data_array):
label = old_header_fields[i+1]
# this is i+1 not i because i is the blank/meaningless
# upper left corner entry.
new_row = [label] + list(row)
new_rows.append(new_row)
new_header = output_delimiter.join(new_header_fields)
return new_header + "\n", new_rows
def make_output_dir_for_file(filepath):
"""Create sub-directories for a new file if they don't already exist"""
dirpath = dirname(filepath)
if not isdir(dirpath) and not dirpath == '':
makedirs(dirpath)
def write_biom_table(biom_table, biom_table_fp, compress=True,
write_hdf5=HAVE_H5PY, format_fs=None):
"""Writes a BIOM table to the specified filepath
Parameters
----------
biom_table : biom.Table
The table object to write out
biom_table_fp : str
The path to the output file
compress : bool, optional
Defaults to ``True``. If True, built-in compression on the output HDF5
file will be enabled. This option is only relevant if ``write_hdf5`` is
``True``.
write_hdf5 : bool, optional
Defaults to ``True`` if H5PY is installed and to ``False`` if H5PY is
not installed. If ``True`` the output biom table will be written as an
HDF5 binary file, otherwise it will be a JSON string.
format_fs : dict, optional
Formatting functions to be passed to `Table.to_hdf5`
Notes
-----
This code was adapted from QIIME 1.9
"""
generated_by = "PICRUSt " + __version__
if write_hdf5:
with biom_open(biom_table_fp, 'w') as biom_file:
biom_table.to_hdf5(biom_file, generated_by, compress,
format_fs=format_fs)
else:
with open(biom_table_fp, 'w') as biom_file:
biom_table.to_json(generated_by, biom_file)
def make_output_dir(dirpath, strict=False):
"""Make an output directory if it doesn't exist
Returns the path to the directory
dirpath -- a string describing the path to the directory
strict -- if True, raise an exception if dir already
exists
"""
dirpath = abspath(dirpath)
#Check if directory already exists
if isdir(dirpath):
if strict == True:
err_str = "Directory '%s' already exists" % dirpath
raise IOError(err_str)
return dirpath
try:
makedirs(dirpath)
except IOError,e:
err_str = "Could not create directory '%s'. Are permissions set correctly? Got error: '%s'" %e
raise IOError(err_str)
return dirpath
class PicrustNode(PhyloNode):
def multifurcating(self, num, eps=None, constructor=None):
"""Return a new tree with every node having num or few children
num : the number of children a node can have max
eps : default branch length to set if self or constructor is of
PhyloNode type
constructor : a TreeNode or subclass constructor. If None, uses self
"""
if num < 2:
raise TreeError, "Minimum number of children must be >= 2"
if eps is None:
eps = 0.0
if constructor is None:
constructor = self.__class__
if hasattr(constructor, 'Length'):
set_branchlength = True
else:
set_branchlength = False
new_tree = self.copy()
for n in new_tree.preorder(include_self=True):
while len(n.Children) > num:
new_node = constructor(Children=n.Children[-num:])
if set_branchlength:
new_node.Length = eps
n.append(new_node)
return new_tree
def bifurcating(self, eps=None, constructor=None):
"""Wrap multifurcating with a num of 2"""
return self.multifurcating(2, eps, constructor)
def nameUnnamedNodes(self):
"""sets the Data property of unnamed nodes to an arbitrary value
Internal nodes are often unnamed and so this function assigns a
value for referencing.
Note*: This method is faster then pycogent nameUnnamedNodes()
because it uses a dict instead of an array. Also, we traverse
only over internal nodes (and not including tips)
"""
#make a list of the names that are already in the tree
names_in_use = {}
for node in self.iterNontips(include_self=True):
if node.Name:
names_in_use[node.Name]=1
#assign unique names to the Data property of nodes where Data = None
name_index = 1
for node in self.iterNontips(include_self=True):
#if (not node.Name) or re.match('edge',node.Name):
if not node.Name:
new_name = 'node' + str(name_index)
#choose a new name if name is already in tree
while new_name in names_in_use:
name_index += 1
new_name = 'node' + str(name_index)
node.Name = new_name
names_in_use[node.Name]=1
name_index += 1
def getSubTree(self,names):
"""return a new subtree with just the tips in names
assumes names is a set
assumes all names in names are present as tips in tree
"""
tcopy = self.deepcopy()
while len(tcopy.tips()) != len(names):
# for each tip, remove it if we do not want to keep it
for n in tcopy.tips():
if n.Name not in names:
n.Parent.removeNode(n)
# reduce single-child nodes
tcopy.prune()
return tcopy
def list_of_list_of_str_formatter(grp, header, md, compression):
"""Serialize [[str]] into a BIOM hdf5 compatible form
Parameters
----------
grp : h5py.Group
This is ignored. Provided for passthrough
header : str
The key in each dict to pull out
md : list of dict
The axis metadata
compression : bool
Whether to enable dataset compression. This is ignored, provided for
passthrough
Returns
-------
grp : h5py.Group
The h5py.Group
header : str
The key in each dict to pull out
md : list of dict
The modified metadata that can be formatted in hdf5
compression : bool
Whether to enable dataset compression.
Notes
-----
This method is intended to be a "passthrough" to BIOM's
vlen_list_of_str_formatter method. It is a transform method.
"""
new_md = [{header: atleast_1d(asarray(dumps(m[header])))} for m in md]
return (grp, header, new_md, compression)
def picrust_formatter(*args):
"""Transform, and format"""
return vlen_list_of_str_formatter(*list_of_list_of_str_formatter(*args))
| gpl-3.0 | 4,356,464,400,882,689,000 | 33.936975 | 243 | 0.632111 | false | 3.734561 | false | false | false |
certik/chess | common/appenginepatch/appenginepatcher/patch.py | 1 | 9983 | # -*- coding: utf-8 -*-
from google.appengine.ext import db
import logging, os, sys
def patch_all():
patch_python()
patch_app_engine()
patch_django()
setup_logging()
def patch_python():
# Remove modules that we want to override
for module in ('httplib', 'urllib', 'urllib2', 'memcache',):
if module in sys.modules:
del sys.modules[module]
# For some reason the imp module can't be replaced via sys.path
from appenginepatcher import have_appserver
if have_appserver:
from appenginepatcher import imp
sys.modules['imp'] = imp
# Add fake error and gaierror to socket module. Required for boto support.
import socket
class error(Exception):
pass
class gaierror(Exception):
pass
socket.error = error
socket.gaierror = gaierror
if have_appserver:
def unlink(_):
raise NotImplementedError('App Engine does not support FS writes!')
os.unlink = unlink
def patch_app_engine():
# This allows for using Paginator on a Query object. We limit the number
# of results to 301, so there won't be any timeouts (301 because you can
# say "more than 300 results").
def __len__(self):
return self.count(301)
db.Query.__len__ = __len__
# Add "model" property to Query (needed by generic views)
class ModelProperty(object):
def __get__(self, query, unused):
try:
return query._Query__model_class
except:
return query._model_class
db.Query.model = ModelProperty()
# Add a few Model methods that are needed for serialization
def _get_pk_val(self):
return unicode(self.key())
db.Model._get_pk_val = _get_pk_val
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
db.Model.__eq__ = __eq__
def __ne__(self, other):
return not self.__eq__(other)
db.Model.__ne__ = __ne__
# Make Property more Django-like (needed for serialization)
db.Property.serialize = True
db.Property.rel = None
class Relation(object):
field_name = 'key_name'
db.ReferenceProperty.rel = Relation
# Add repr to make debugging a little bit easier
def __repr__(self):
d = dict([(k, getattr(self, k)) for k in self.properties()])
return '%s(**%s)' % (self.__class__.__name__, repr(d))
db.Model.__repr__ = __repr__
# Replace save() method with one that calls put(), so a monkey-patched
# put() will also work if someone uses save()
def save(self):
return self.put()
db.Model.save = save
# Add _meta to Model, so porting code becomes easier (generic views,
# xheaders, and serialization depend on it).
class _meta(object):
many_to_many = []
class pk:
name = 'key_name'
def __init__(self, model):
self.app_label = model.__module__.split('.')[-2]
self.object_name = model.__name__
self.module_name = self.object_name.lower()
self.verbose_name = self.object_name.lower()
self.verbose_name_plural = None
self.abstract = False
self.model = model
def __str__(self):
return '%s.%s' % (self.app_label, self.module_name)
@property
def local_fields(self):
return self.model.properties().values()
# Register models with Django
old_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
cls._meta = _meta(cls)
cls._default_manager = cls
old_init(cls, name, bases, attrs)
from django.db.models.loading import register_models
register_models(cls._meta.app_label, cls)
db.PropertiedClass.__init__ = __init__
def log_exception(*args, **kwargs):
logging.exception('Exception in request:')
def patch_django():
# In order speed things up and consume less memory we lazily replace
# modules if possible. This requires some __path__ magic. :)
# Add fake 'appengine' DB backend
# This also creates a separate datastore for each project.
from appenginepatcher.db_backends import appengine
sys.modules['django.db.backends.appengine'] = appengine
base_path = os.path.abspath(os.path.dirname(__file__))
# Replace generic views
from django.views import generic
generic.__path__.insert(0, os.path.join(base_path, 'generic_views'))
# Replace db session backend and tests
from django.contrib import sessions
sessions.__path__.insert(0, os.path.join(base_path, 'sessions'))
from django.contrib.sessions import backends
backends.__path__.insert(0, os.path.join(base_path, 'session_backends'))
# Replace the dispatchers.
from django.core import signals
# Log errors.
signals.got_request_exception.connect(log_exception)
# Unregister the rollback event handler.
import django.db
signals.got_request_exception.disconnect(django.db._rollback_on_exception)
# Replace auth models
# This MUST happen before any other modules import User or they'll
# get Django's original User model!!!
from appenginepatcher.auth import models
sys.modules['django.contrib.auth.models'] = models
# Replace rest of auth app
from django.contrib import auth
auth.__path__.insert(0, os.path.join(base_path, 'auth'))
# Replace ModelForm
# This MUST happen as early as possible, but after User got replaced!
from google.appengine.ext.db import djangoforms as aeforms
from django import forms
from django.forms import models as modelforms
forms.ModelForm = modelforms.ModelForm = aeforms.ModelForm
forms.ModelFormMetaclass = aeforms.ModelFormMetaclass
modelforms.ModelFormMetaclass = aeforms.ModelFormMetaclass
# Fix handling of verbose_name. Google resolves lazy translation objects
# immedately which of course breaks translation support.
from django.utils.text import capfirst
def get_form_field(self, form_class=forms.CharField, **kwargs):
defaults = {'required': self.required}
if self.verbose_name:
defaults['label'] = capfirst(self.verbose_name)
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((str(choice), unicode(choice)))
defaults['widget'] = forms.Select(choices=choices)
if self.default is not None:
defaults['initial'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
db.Property.get_form_field = get_form_field
# Extend ModelForm with support for EmailProperty
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(db.EmailProperty, self).get_form_field(**defaults)
db.EmailProperty.get_form_field = get_form_field
# Fix default value of UserProperty (Google resolves the user too early)
def get_form_field(self, **kwargs):
from django.contrib.auth.models import User
from django.utils.functional import lazy
from google.appengine.api import users
defaults = {'initial': lazy(users.GetCurrentUser, User)}
defaults.update(kwargs)
return super(db.UserProperty, self).get_form_field(**defaults)
db.UserProperty.get_form_field = get_form_field
# Replace mail backend
from appenginepatcher import mail as gmail
from django.core import mail
mail.SMTPConnection = gmail.GoogleSMTPConnection
mail.mail_admins = gmail.mail_admins
mail.mail_managers = gmail.mail_managers
# Fix translation support if we're in a zip file. We change the path
# of the django.conf module, so the translation code tries to load
# Django's translations from the common/django-locale/locale folder.
from django import conf
from aecmd import COMMON_DIR
if '.zip' + os.sep in conf.__file__:
conf.__file__ = os.path.join(COMMON_DIR, 'django-locale', 'fake.py')
# Patch login_required if using Google Accounts
from django.conf import settings
if 'ragendja.auth.middleware.GoogleAuthenticationMiddleware' in \
settings.MIDDLEWARE_CLASSES:
from ragendja.auth.decorators import google_login_required, \
redirect_to_google_login
from django.contrib.auth import decorators, views
decorators.login_required = google_login_required
views.redirect_to_login = redirect_to_google_login
# Activate ragendja's GLOBALTAGS support (automatically done on import)
from ragendja import template
# Patch auth forms
from appenginepatcher import auth_forms_patch
# Add XML serializer
if not hasattr(settings, 'SERIALIZATION_MODULES'):
settings.SERIALIZATION_MODULES = {}
for name in ('xml', 'python', 'json', 'yaml'):
settings.SERIALIZATION_MODULES[name] = 'appenginepatcher.serializers.' \
+ name
# Patch DeserializedObject
from django.core.serializers import base
class DeserializedObject(base.DeserializedObject):
def save(self, save_m2m=True):
self.object.save()
self.object._parent = None
base.DeserializedObject = DeserializedObject
def setup_logging():
from django.conf import settings
if settings.DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
| mit | 7,800,223,992,368,336,000 | 36.389513 | 80 | 0.650005 | false | 4.1218 | false | false | false |
ronin13/pyvolume | pyvolume/sshfs.py | 1 | 4118 | # -*- coding: utf-8 -*-
""" Module providing SSHFileSystem implementation."""
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import os
import os.path
from plumbum import ProcessExecutionError
from plumbum.cmd import sshfs
from plumbum.cmd import sudo
from plumbum.cmd import umount
from pyvolume.exceptions import NeedOptionsException
log = logging.getLogger(__name__)
class SSHFileSystem(object):
"""
Mounts an external directory pointed by `remote_path`
onto `base` (/mnt by default) and passes it to Docker
to use as a volume. Uses vol_dict to keep track of
different volumes.
"""
def __init__(self, base):
self.base = base
self.sshfs_options = [
"-o",
"reconnect,cache_timeout=60,allow_other,uid=1000,gid=1000,intr",
]
self.vol_dict = {}
def create(self, volname, options):
""" Creates the directories but does not mount it yet."""
if "remote_path" not in options:
raise NeedOptionsException("remote_path is a required option for sshfs")
remote_path = options["remote_path"]
local_path = os.path.join(self.base, volname)
log.info("Creating directory " + local_path)
os.mkdir(local_path)
cmdline = []
if "ssh_config" in options:
cmdline += ["-F", options["ssh_config"]]
if "sshfs_options" in options:
sshfs_options = [options["sshfs_options"]]
else:
sshfs_options = self.sshfs_options
cmdline += [remote_path]
cmdline += [local_path]
cmdline += sshfs_options
self.vol_dict[volname] = {
"Local": local_path,
"Remote": remote_path,
"cmdline": cmdline,
"mounted": False,
}
def list(self):
""" Lists the existing volumes being managed."""
vol_list = []
for volumes in self.vol_dict:
vol_list += [volumes]
return vol_list
def mount_check(self, volname):
"""Check if the volume is already mounted.
If mounted, return its path.
"""
if not self.vol_dict[volname]["mounted"]:
log.error("Volume {0} is not mounted".format(volname))
return None
return self.vol_dict[volname]["Local"]
def path(self, volname):
"""Check if the volume is already mounted.
If mounted, return its path.
"""
if not self.mount_check(volname):
return None
return self.vol_dict[volname]["Local"]
def remove(self, volname):
"""
Removes the volume.
It unmounts the remote if necessary, tolerates
if already unmounted.
After which, it removes the mounted directory.
"""
local_path = self.vol_dict[volname]["Local"]
try:
self.umount(volname)
except ProcessExecutionError as e:
if e.retcode != 1:
raise
log.info("Removing local path " + local_path)
if os.path.exists(local_path):
os.rmdir(local_path)
return True
def mount(self, volname):
""" Mount the remote onto local for volname. """
check = self.mount_check(volname)
if check:
return check
cmdline = self.vol_dict[volname]["cmdline"]
mount_cmd = sshfs[cmdline]
mount_cmd()
self.vol_dict[volname]["mounted"] = True
return self.vol_dict[volname]["Local"]
def umount(self, volname):
if not self.mount_check(volname):
return None
local_path = self.vol_dict[volname]["Local"]
umount_cmd = sudo[umount[local_path]]
umount_cmd()
self.vol_dict[volname]["mounted"] = False
return True
def cleanup(self):
""" Unmounts and removes mount paths when shutting down."""
for volume in self.vol_dict:
self.remove(volume)
def scope(self):
""" Returns scope of this - global."""
return "global"
| mit | -1,407,974,807,998,446,800 | 29.279412 | 84 | 0.578679 | false | 4.089374 | false | false | false |
PhilLidar-DAD/geonode | geonode/eula/models.py | 1 | 2090 | from django.db import models
from geonode.layers.models import Layer
from geonode.documents.models import Document
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from geonode.base.models import ResourceBase
from geonode.people.models import OrganizationType
from django_enumfield import enum
try:
from django.conf import settings
User = settings.AUTH_USER_MODEL
except ImportError:
from django.contrib.auth.models import User
from geonode.datarequests.models import LipadOrgType
# Create your models here.
class EULALayerDownload(models.Model):
date_time = models.DateTimeField(default=datetime.now)
user = models.ForeignKey(User, null=False, blank=False)
layer = models.ForeignKey(Layer, null=False, blank=False)
def __unicode__(self):
return "{0}:{1}".format(self.user.username, self.layer.title)
class AnonDownloader(models.Model):
ORG_TYPE_CHOICES = LipadOrgType.objects.values_list('val', 'display_val')
date = models.DateTimeField(auto_now=True)
anon_first_name = models.CharField(_('First Name'), max_length=100)
anon_last_name = models.CharField(_('Last Name'), max_length=100)
anon_email = models.EmailField(_('Email'), max_length=50)
anon_organization = models.CharField(_('Organization'), max_length=100)
anon_purpose = models.CharField(_('Purpose'), max_length=100)
anon_layer = models.CharField(_('Layer Name'), max_length=100, null=True, blank=True,)
anon_orgtype = models.CharField(
_('Organization Type'),
max_length=100,
choices=ORG_TYPE_CHOICES,
default="Other",
help_text='Organization type based on Phil-LiDAR1 Data Distribution Policy'
)
anon_orgother = models.CharField(
_('If Other, please specify'),
max_length=255,
blank=True,
null=True,
)
# anon_resourcebase = models.ForeignKey(ResourceBase, null=True, blank=True, related_name='anon_resourcebase')
anon_document = models.CharField(_('Document Name'), max_length=100, null=True, blank=True,)
| gpl-3.0 | 5,668,980,210,110,807,000 | 42.541667 | 114 | 0.710526 | false | 3.813869 | false | false | false |
Griger/Intel-CervicalCancer-KaggleCompetition | featureHOG.py | 1 | 1456 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import numpy as np
from math import pi
from keras.preprocessing.image import ImageDataGenerator
import cv2
from sklearn.cluster import KMeans
import sklearn.preprocessing as prepro
# Generamos nuevos ejemplos
'''
datagen = ImageDataGenerator(
rotation_range=180,
shear_range=pi,
fill_mode='nearest')
train_data = np.load('Datos/train244all.npy')
train_labels = np.load('Datos/train_target244all.npy')
datagen.fit(train_data,rounds=2)
i = 0
nuevas_imagenes = []
tam = 1
for batch in datagen.flow(train_data,train_labels,batch_size = (len(train_data))):
i += 1
if i > tam:
break
nuevas_imagenes.append(batch[0])
nuevas_imagenes = np.array(nuevas_imagenes)
nuevas_imagenes = np.reshape(nuevas_imagenes, (len(train_data)*tam,244,244,3))
np.save('Datos/extraRotations.npy', nuevas_imagenes, allow_pickle=True, fix_imports=True)
'''
train_data = np.load('Datos/train244all.npy')
test_data = np.load('Datos/test244.npy')
hog = cv2.HOGDescriptor()
def getHist(image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = image * 255
image = image.astype('uint8')
return hog.compute(image)
histograms = [getHist(img) for img in train_data]
if __name__ == '__main__':
# Guardar los histogramas
| gpl-3.0 | 4,660,950,177,554,977,000 | 21.483871 | 94 | 0.665522 | false | 2.95935 | false | false | false |
m3z/HT | openstack_dashboard/api/swift.py | 1 | 9568 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext as _
from horizon import exceptions
from openstack_dashboard.api.base import url_for, APIDictWrapper
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
class Container(APIDictWrapper):
pass
class StorageObject(APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
class PseudoFolder(APIDictWrapper):
"""
Wrapper to smooth out discrepencies between swift "subdir" items
and swift pseudo-folder objects.
"""
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
def _has_content_type(self):
content_type = self._apidict.get("content_type", None)
return content_type == "application/directory"
@property
def name(self):
if self._has_content_type():
return self._apidict['name']
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
if self._has_content_type():
return self._apidict['bytes']
return None
@property
def content_type(self):
return "application/directory"
def _objectify(items, container_name):
""" Splits a listing of objects into their appropriate wrapper classes. """
objects = {}
subdir_markers = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("content_type", None) == "application/directory":
objects[item['name']] = PseudoFolder(item, container_name)
elif item.get("subdir", None) is not None:
subdir_markers.append(PseudoFolder(item, container_name))
else:
objects[item['name']] = StorageObject(item, container_name)
# Revisit subdirs to see if we have any non-duplicates
for item in subdir_markers:
if item.name not in objects.keys():
objects[item.name] = item
return objects.values()
def swift_api(request):
endpoint = url_for(request, 'object-store')
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_create_container(request, name):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
swift_api(request).put_container(name)
return Container({'name': name})
def swift_delete_container(request, name):
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
try:
# FIXME(gabriel): The swift currently fails at unicode in the
# copy_to method, so to provide a better experience we check for
# unicode here and pre-empt with an error message rather than
# letting the call fail.
str(orig_container_name)
str(orig_object_name)
str(new_container_name)
str(new_object_name)
except UnicodeEncodeError:
raise exceptions.HorizonException(_("Unicode is not currently "
"supported for object copy."))
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_create_subfolder(request, container_name, folder_name):
headers = {'content-type': 'application/directory',
'content-length': 0}
etag = swift_api(request).put_object(container_name,
folder_name,
None,
headers=headers)
obj_info = {'subdir': folder_name, 'etag': etag}
return PseudoFolder(obj_info, container_name)
def swift_upload_object(request, container_name, object_name, object_file):
headers = {}
headers['X-Object-Meta-Orig-Filename'] = object_file.name
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': object_file.size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name):
headers, data = swift_api(request).get_object(container_name, object_name)
orig_name = headers.get("x-object-meta-orig-filename")
obj_info = {'name': object_name, 'bytes': len(data)}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
| apache-2.0 | -1,708,836,969,548,699,400 | 35.10566 | 79 | 0.599289 | false | 4.248668 | false | false | false |
350dotorg/Django | django/core/mail/message.py | 1 | 10976 | import mimetypes
import os
import random
import time
from email import Charset, Encoders
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, getaddresses, formataddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import smart_str, force_unicode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.QP, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ('to', 'from', 'cc'):
result = []
for nm, addr in getaddresses((val,)):
nm = str(Header(nm.encode(encoding), encoding))
try:
addr = addr.encode('ascii')
except UnicodeEncodeError: # IDN
addr = str(Header(addr.encode(encoding), encoding))
result.append(formataddr((nm, addr)))
val = ', '.join(result)
else:
val = Header(val.encode(encoding), encoding)
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Bcc entries).
"""
return self.to + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers)
self.alternatives=alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| bsd-3-clause | 5,907,717,041,566,670,000 | 36.979239 | 122 | 0.622449 | false | 4.31277 | false | false | false |
erichaase/topcoder-python | topcoder/knights_tour.py | 1 | 2012 | """
`KnightsTour <http://community.topcoder.com/stat?c=problem_statement&pm=10577>`__
"""
def solution (board):
b, n = Board(board), 1
while b.update(): n += 1
return n
class Board:
def __init__ (self, board):
self.board = [list(row) for row in board]
def update (self):
k, t = self.next_move()
if k and t:
self.board[k[0]][k[1]] = "*"
self.board[t[0]][t[1]] = "K"
return True
else:
return False
def next_move (self):
k = self.knight()
m = self.moves(k)
m.sort(key = lambda p: p[1])
m.sort(key = lambda p: p[0])
m.sort(key = lambda p: len(self.moves(p)))
t = None
if len(m) > 0:
t = m[0]
return k, t
def knight (self):
for x, row in enumerate(self.board):
for y, cell in enumerate(row):
if cell == "K":
return x, y
return None, None
def moves (self, p):
x, y = p[0], p[1]
targets = [
[x - 2, y - 1],
[x - 2, y + 1],
[x - 1, y + 2],
[x + 1, y + 2],
[x + 2, y - 1],
[x + 2, y + 1],
[x - 1, y - 2],
[x + 1, y - 2],
]
m = []
for target in targets:
if self.valid(target):
m.append(target)
return m
def valid (self, p):
x, y = p[0], p[1]
if x < 0:
return False
if x >= len(self.board):
return False
if y < 0:
return False
if y >= len(self.board[0]):
return False
c = self.board[x][y]
if c == "*":
return False
if c == "K":
return False
if c == ".":
return True
return False
def __str__ (self):
s = ""
for row in self.board:
s += "".join(row)
s += "\n"
return s
| mit | -7,622,723,604,843,863,000 | 21.863636 | 81 | 0.394135 | false | 3.451115 | false | false | false |
fupadev/FuME | fume/threads/DownloadProcessor.py | 1 | 5680 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# --------------------------------------------------------------------------
# FuME FuPa Match Explorer Copyright (c) 2017 Andreas Feldl <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The full license of the GNU General Public License is in the file LICENCE,
# distributed with this software; if not, see http://www.gnu.org/licenses/.
# --------------------------------------------------------------------------
import sqlite3
import lxml.html
import requests
from PyQt5 import QtCore
class DownloadProcessor(QtCore.QThread):
loggerSignal = QtCore.pyqtSignal(str)
statusBarSignal = QtCore.pyqtSignal(str)
def __init__(self, options):
super(DownloadProcessor, self).__init__(options['parent'])
self.region = options['region']
self.date_from = options['date-from']
self.date_to = options['date-to']
self.dbPath = options['database-path']
# def __del__(self):
# self.wait()
def download(self, date):
uAStr = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
headers = {'User-Agent': uAStr}
url = 'https://www.fupa.net/index.php?page=kalender&site_linkurl=%s&date=%s' % (self.region, date)
r = requests.get(url, headers=headers)
doc = lxml.html.fromstring(r.content)
path = '/html/body//table[@class]//tr/td/a[not(contains(@class, "spielbericht_icon"))]//text() | ' \
'/html/body//table[@class]//tr/td//img/@src | ' \
'/html/body//table[@class]//th//text() | ' \
'/html/body//table[@class]//th/a/@href | ' \
'/html/body//table[@class]//tr/td[@style]/a/@href'
raw = doc.xpath(path)
# replacing '-Live-' with '-:-'
raw = [i.replace('https://www.fupa.net/fupa/images/buttons/tipp_live.jpg', '-:-') for i in raw]
# From
# ['/liga/bezirksliga-west-31261.html', 'Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', '-:-',
# '/spielberichte/tsv-abensberg-spvgg-mariaposching-3679861.html', 'SpVgg Mariaposching',
# To
# [['Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', '-:-', '3679861', 'SpVgg Mariaposching'],
matches = []
for i, d in enumerate(raw):
if 'Relegation' in d:
league = 'Relegation'
elif '/liga/' in d:
league = raw[i + 1]
elif 'Test' in d:
league = raw[i]
if 'Uhr' in d:
# print(i)
current = [league]
for i in raw[i:i + 5]:
if '/spielberichte/' in i:
i = i.split('.')[0].split('-')[-1]
if '/spielberichte/' in i: # Fehler in Fupa: URL = '/spielberichte/.html'
i = ''
current.append(i)
matches.append(current)
# rearrange
# ['3679861', 'Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', 'SpVgg Mariaposching', '-:-']
tmp = []
for spiel in matches:
order = [4, 0, 1, 2, 5, 3]
spiel = [spiel[i] for i in order]
spiel[2] = date + ' ' + spiel[2][0:5]
tmp.append(spiel)
data = tmp
connection = sqlite3.connect(self.dbPath)
cursor = connection.cursor()
for p in data:
format_str = """INSERT OR IGNORE INTO calendar(match_id, league, match_date, home, guest, result, region)
VALUES ("{match_id}", "{league}", "{match_date}", "{home}", "{guest}", "{result}", "{region}");"""
sql_command = format_str.format(match_id=p[0], league=p[1], match_date=p[2],
home=p[3], guest=p[4], result=p[5], region=self.region)
try:
cursor.execute(sql_command)
except:
self.loggerSignal.emit('Folgendes Spiel wurde nicht hinzugefügt: %s' % p)
update_str = """UPDATE calendar
SET match_date="{match_date}", result="{result}", league="{league}" WHERE match_id = "{match_id}";"""
sql_command = update_str.format(match_id=p[0], match_date=p[2], league=p[1], result=p[5])
try:
cursor.execute(sql_command)
except:
self.loggerSignal.emit('Folgendes Spiel wurde nicht hinzugefügt: %s' % p)
connection.commit()
connection.close()
return len(data)
def run(self):
self.statusBarSignal.emit("Download")
date_from = self.date_from
date_to = self.date_to.addDays(1)
counter = 0
while date_from != date_to:
try:
counter += self.download(date_from.toString("yyyy-MM-dd"))
except Exception as e:
self.loggerSignal.emit('Fehler beim importieren: %s' % e)
return
date_from = date_from.addDays(1)
self.statusBarSignal.emit("Download: #%s Spiele" % counter)
self.loggerSignal.emit('%s Spiele erfolgreich hinzugefügt' % counter)
self.statusBarSignal.emit("Bereit")
| gpl-3.0 | -4,301,365,850,361,170,000 | 38.423611 | 121 | 0.543773 | false | 3.457369 | false | false | false |
ibc/MediaSoup | worker/deps/catch/projects/TestScripts/testRandomOrder.py | 1 | 2135 | #!/usr/bin/env python3
"""
This test script verifies that the random ordering of tests inside
Catch2 is invariant in regards to subsetting. This is done by running
the binary 3 times, once with all tests selected, and twice with smaller
subsets of tests selected, and verifying that the selected tests are in
the same relative order.
"""
import subprocess
import sys
import random
def list_tests(self_test_exe, tags, rng_seed):
cmd = [self_test_exe, '--list-test-names-only', '--order', 'rand',
'--rng-seed', str(rng_seed)]
tags_arg = ','.join('[{}]'.format(t) for t in tags)
if tags_arg:
cmd.append(tags_arg + '~[.]')
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stderr:
raise RuntimeError("Unexpected error output:\n" + process.stderr)
result = stdout.split(b'\n')
result = [s for s in result if s]
if len(result) < 2:
raise RuntimeError("Unexpectedly few tests listed (got {})".format(
len(result)))
return result
def check_is_sublist_of(shorter, longer):
assert len(shorter) < len(longer)
assert len(set(longer)) == len(longer)
indexes_in_longer = {s: i for i, s in enumerate(longer)}
for s1, s2 in zip(shorter, shorter[1:]):
assert indexes_in_longer[s1] < indexes_in_longer[s2], (
'{} comes before {} in longer list.\n'
'Longer: {}\nShorter: {}'.format(s2, s1, longer, shorter))
def main():
self_test_exe, = sys.argv[1:]
# We want a random seed for the test, but want to avoid 0,
# because it has special meaning
seed = random.randint(1, 2 ** 32 - 1)
list_one_tag = list_tests(self_test_exe, ['generators'], seed)
list_two_tags = list_tests(self_test_exe, ['generators', 'matchers'], seed)
list_all = list_tests(self_test_exe, [], seed)
# First, verify that restricting to a subset yields the same order
check_is_sublist_of(list_two_tags, list_all)
check_is_sublist_of(list_one_tag, list_two_tags)
if __name__ == '__main__':
sys.exit(main())
| isc | 6,595,939,224,350,894,000 | 35.186441 | 79 | 0.640749 | false | 3.405104 | true | false | false |
arpitprogressive/arpittest | intergration_test/banner.py | 1 | 18254 | # -*- coding: utf-8 -*-
"""
banner
Description goes here...
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, \
NoAlertPresentException
from base import Selenium2OnSauce
class Banner(Selenium2OnSauce):
def test_advanced_skills(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/advanced-skills/")
self.assertTrue(self.is_element_present(By.ID, "wfmis"))
def test_advanced_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/advanced-skills/")
def test_advanced_skills_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/advanced-skills/")
def test_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/")
def test_central_overnment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/central-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_company_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/company-research/")
def test_company_training_provider(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/company-training-programs/")
def test_courseware(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_developing_tomorrow(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/")
def test_download(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/download/")
def test_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/epp/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/")
def test_event(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/")
def test_executive_summary(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/executive-summary/")
def test_foundation_advance_skills_devlopment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/")
def test_foundation_convocation_banner(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[3]"))
driver.get("http://pursuite.openlabs.us/about-us/ssc-nasscom/vision-mission/")
def test_foundation_skills_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/")
def test_foundation_skills_ed(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_foundation_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/foundation-skills/")
def test_full_course(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/full-course/")
def test_gbfs_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_government_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_government_training_program(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/")
def test_healp_you_choose(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.LINK_TEXT, "Know More"))
def test_ict_academy_tamilnadu(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/ict-academy-tamilnadu/")
def test_il_fs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resougvrces/private-sector-training-programs/ilfs/")
def test_implementation_cycle_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/implementation-cycle/")
def test_interactive_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/interactive-tools/")
def test_it_initiative(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_it_ites(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/it-ites-initiativesprograms/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[7]/div"))
def test_listining_of_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/listing-programs/")
def test_nasscom_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/nasscom-research/")
def test_niit(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/niit/")
def test_obf_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/outcome-based-framework-gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_other_bodies_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_other_bodies(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[5]/div"))
def test_other_publication(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/other-publication/")
def test_policy_development(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/policy-development/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_private_sector_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/")
def test_program_registration(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/program-registration/")
def test_promotion_marketing(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/promotion-marketing/")
def test_read_only(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_skills_academy(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/skills-academy/")
def test_software_products(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/")
def test_ssc_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/")
def test_state_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/state-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_talent_sprint(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/talent-sprint/")
def test_training_materials(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-materials/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_training_that_helps_you(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[2]"))
def test_training_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-tools/")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 7,783,938,493,523,269,000 | 60.461279 | 238 | 0.7165 | false | 3.1713 | true | false | false |
alfa-addon/addon | plugin.video.alfa/channels/bloghorror.py | 1 | 5917 | # -*- coding: utf-8 -*-
# -*- Channel BlogHorror -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import os
import re
from bs4 import BeautifulSoup
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger, subtitletools
from channelselector import get_thumb
host = 'http://bloghorror.com/'
fanart = 'http://bloghorror.com/wp-content/uploads/2015/04/bloghorror-2017-x.jpg'
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title = 'Buscar', action="search", url=host + '?s=', pages=3,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find(id="primary").find_all("article")
for elem in matches:
cat = elem.find("a", class_="covernews-categories")["alt"]
if cat in ["View all posts in Las Mejores Peliculas de Terror", "View all posts in Editoriales"]:
continue
title_data = elem.find("h3", class_="article-title").text.strip()
if "(" in title_data:
title = title_data.replace(")", "").split(" (")
elif "[" in title_data:
title = title_data.replace("]", "").split(" [")
url = elem.find("h3", class_="article-title").a["href"]
thumb = elem.find("div", class_="data-bg-hover")["data-background"]
try:
year = title[1]
except:
year = "-"
if "serie" in url:
continue
itemlist.append(Item(channel=item.channel, title=title[0], url=url, contentTitle=title[0], thumbnail=thumb,
action="findvideos", infoLabels={"year": year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist:
try:
next_page = soup.find("div", class_="navigation").find("a", class_="next")["href"]
if next_page != '':
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>',
url=next_page))
except:
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
soup = create_soup(item.url).find("div", class_="entry-content-wrap")
quality = scrapertools.find_single_match(soup.text, r"Calidad: ([^\n]+)\n").split("+")
urls_list = soup.find_all("a", {"data-wpel-link": True, "href": re.compile("magnet|torrent")})
try:
sub_url = soup.find("a", {"data-wpel-link": True, "href": re.compile("subdivx")})["href"]
except:
sub_url = ""
qlty_cnt = 0
for url in urls_list:
url = url["href"]
if not sub_url:
lang = 'VO'
else:
lang = 'VOSE'
try:
qlty = quality[qlty_cnt]
qlty_cnt += 1
except:
qlty = "SD"
itemlist.append(Item(channel=item.channel, title="[%s][%s][%s]", url=url, action="play", quality=qlty,
language=lang, subtitle=sub_url, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server, i.language, i.quality))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def play(item):
logger.info()
if item.subtitle:
sub = subtitletools.get_from_subdivx(item.subtitle)
return [item.clone(subtitle=sub)]
else:
return [item]
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'terror', 'torrent']:
item.url = host
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
| gpl-3.0 | 5,249,670,684,535,636,000 | 28.575 | 123 | 0.572612 | false | 3.70382 | false | false | false |
NicholasHoCode/Galaxy | assets/code/GF.py | 1 | 2040 | from PIL import Image
import numpy as np
import math
from scipy import signal
def boxfilter(n):
assert (n%2 != 0),"Dimension must be odd"
a = np.empty((n, n))
a.fill(1/(n*n))
return a
def gauss1d(sigma):
arr_length = 6*sigma
if arr_length % 2 == 0:
val = ((arr_length)/2)+1
elif arr_length.is_integer() == False:
arr_length = np.ceil(arr_length)
val = (arr_length + 1)/2
if arr_length % 2 == 0:
arr_length = arr_length + 1
val = arr_length - 1
elif arr_length % 2 != 0:
val = (arr_length + 1)/2
lst = list(range(int(val)))
neg_lst = [-x for x in lst]
neg_lst.remove(0)
neg_lst.reverse()
a_val = neg_lst + lst
a_val = [math.exp(- (abs(x)*abs(x)) / (2*sigma*sigma)) for x in a_val]
sum_aval = sum(a_val)
a_aval = [(1/sum_aval)*x for x in a_val]
return np.asarray(a_aval)
def gauss2d(sigma):
f = gauss1d(sigma)
return signal.convolve2d(f[np.newaxis], np.transpose(f[np.newaxis]))
def gaussconvolve2d(array,sigma):
assert (array.ndim == 2),"Array must be 2D"
filter = gauss2d(sigma)
result = signal.convolve2d(array, filter, 'same')
return result
# signal.convolve2d and signal.correlated2d will produce different results if the filter is not symetric due to the associative property of convolution in essence
# convolution is used when multiple symmteric filters are pre-convolved and those multiple filters are then convolved to a single filter.
im = Image.open('bb.jpg')
im.show()
im = im.convert('L')
im_arr = np.asarray(im)
nim = gaussconvolve2d(im_arr, 3)
fim = Image.fromarray(nim)
if fim.mode != 'L':
fim = fim.convert('L')
fim.save('bb_filtered.jpg')
# Since convolution with a Gaussian is seperable a 2D Gaussian filter can be obtianed by multiplying two 1D Gaussian filter a more efficient implementation will be to first convolve each row with a 1D fillter
# then convolve each column with a 1D filter which results in O(n) complexity instead of O(n^2) complexity.
| mit | -6,833,917,436,678,237,000 | 34.172414 | 208 | 0.659314 | false | 3.044776 | false | false | false |
Azure/azure-sdk-for-python | common/smoketest/key_vault_keys_async.py | 1 | 1475 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import uuid
from azure.keyvault.keys.aio import KeyClient
from key_vault_base_async import KeyVaultBaseAsync
class KeyVaultKeys(KeyVaultBaseAsync):
def __init__(self):
args = self.get_client_args()
self.key_client = KeyClient(**args)
self.key_name = "key-name-" + uuid.uuid1().hex
async def create_rsa_key(self):
print("Creating an RSA key...")
await self.key_client.create_rsa_key(name=self.key_name, size=2048)
print("\tdone")
async def get_key(self):
print("Getting a key...")
key = await self.key_client.get_key(name=self.key_name)
print("\tdone, key: {}.".format(key.name))
async def delete_key(self):
print("Deleting a key...")
deleted_key = await self.key_client.delete_key(name=self.key_name)
print("\tdone: " + deleted_key.name)
async def run(self):
print("")
print("------------------------")
print("Key Vault - Keys\nIdentity - Credential")
print("------------------------")
print("1) Create a key")
print("2) Get that key")
print("3) Delete that key (Clean up the resource)")
print("")
try:
await self.create_rsa_key()
await self.get_key()
finally:
await self.delete_key() | mit | 5,781,894,766,305,510,000 | 31.086957 | 75 | 0.541695 | false | 3.891821 | false | false | false |
pytroll/pygac | pygac/tests/test_calibrate_pod.py | 1 | 5529 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Martin Raspaud
# Author(s):
# Martin Raspaud <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test function for the POD calibration.
"""
import unittest
try:
import mock
except ImportError:
from unittest import mock
import numpy as np
from pygac.calibration import Calibrator, calibrate_solar, calibrate_thermal
class TestGenericCalibration(unittest.TestCase):
def test_calibration_vis(self):
counts = np.array([[0, 0, 0, 0, 0,
512, 512, 512, 512, 512,
1023, 1023, 1023, 1023, 1023],
[41, 41, 41, 41, 41,
150, 150, 150, 150, 150,
700, 700, 700, 700, 700]])
year = 1997
jday = 196
spacecraft_id = "noaa14"
cal = Calibrator(spacecraft_id)
corr = 1
channel = 0
ref1 = calibrate_solar(counts[:, channel::5], channel, year, jday, cal, corr)
channel = 1
ref2 = calibrate_solar(counts[:, channel::5], channel, year, jday, cal, corr)
channel = 2
data = np.ma.array(counts[:, channel::5], mask=True)
ref3 = calibrate_solar(data, channel, year, jday, cal, corr)
expected = (np.array([[np.nan, 60.891074, 126.953364],
[0., 14.091565, 85.195791]]),
np.array([[np.nan, 72.98262, 152.16334],
[0., 16.889821, 102.113687]]),
np.array([[-32001., -32001., -32001.],
[-32001., -32001., -32001.]]))
np.testing.assert_allclose(ref1, expected[0])
np.testing.assert_allclose(ref2, expected[1])
np.testing.assert_allclose(ref3.filled(-32001), expected[2])
def test_calibration_ir(self):
counts = np.array([[0, 0, 612, 0, 0,
512, 512, 487, 512, 512,
923, 923, 687, 923, 923],
[41, 41, 634, 41, 41,
150, 150, 461, 150, 150,
700, 700, 670, 700, 700],
[241, 241, 656, 241, 241,
350, 350, 490, 350, 350,
600, 600, 475, 600, 600]])
prt_counts = np.array([0, 230, 230])
ict_counts = np.array([[745.3, 397.9, 377.8],
[744.8, 398.1, 378.4],
[745.7, 398., 378.3]])
space_counts = np.array([[987.3, 992.5, 989.4],
[986.9, 992.8, 989.6],
[986.3, 992.3, 988.9]])
spacecraft_id = "noaa14"
cal = Calibrator(spacecraft_id)
ch3 = calibrate_thermal(counts[:, 2::5],
prt_counts,
ict_counts[:, 0],
space_counts[:, 0],
line_numbers=np.array([1, 2, 3]),
channel=3,
cal=cal)
expected_ch3 = np.array([[298.28466, 305.167571, 293.16182],
[296.878502, 306.414234, 294.410224],
[295.396779, 305.020259, 305.749526]])
np.testing.assert_allclose(expected_ch3, ch3)
ch4 = calibrate_thermal(counts[:, 3::5],
prt_counts,
ict_counts[:, 1],
space_counts[:, 1],
line_numbers=np.array([1, 2, 3]),
channel=4,
cal=cal)
expected_ch4 = np.array([[325.828062, 275.414804, 196.214709],
[322.359517, 312.785057, 249.380649],
[304.326806, 293.490822, 264.148021]])
np.testing.assert_allclose(expected_ch4, ch4)
ch5 = calibrate_thermal(counts[:, 4::5],
prt_counts,
ict_counts[:, 2],
space_counts[:, 2],
line_numbers=np.array([1, 2, 3]),
channel=5,
cal=cal)
expected_ch5 = np.array([[326.460316, 272.146547, 187.434456],
[322.717606, 312.388155, 244.241633],
[303.267012, 291.590832, 260.05426]])
np.testing.assert_allclose(expected_ch5, ch5)
def suite():
"""The suite for test_slerp
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestGenericCalibration))
return mysuite
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,917,036,265,671,069,000 | 35.375 | 85 | 0.477302 | false | 3.748475 | true | false | false |
mind1master/aiohttp | tests/test_client_session.py | 1 | 14280 | import asyncio
import contextlib
import gc
import http.cookies
import re
import types
from unittest import mock
import pytest
from multidict import CIMultiDict, MultiDict
import aiohttp
from aiohttp import web
from aiohttp.client import ClientSession
from aiohttp.connector import BaseConnector, TCPConnector
@pytest.fixture
def connector(loop):
conn = BaseConnector(loop=loop)
transp = mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
return conn
@pytest.yield_fixture
def create_session(loop):
session = None
def maker(*args, **kwargs):
nonlocal session
session = ClientSession(*args, loop=loop, **kwargs)
return session
yield maker
if session is not None:
session.close()
@pytest.fixture
def session(create_session):
return create_session()
@pytest.fixture
def params():
return dict(
headers={"Authorization": "Basic ..."},
max_redirects=2,
encoding="latin1",
version=aiohttp.HttpVersion10,
compress="deflate",
chunked=True,
expect100=True,
read_until_eof=False)
def test_init_headers_simple_dict(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
assert (sorted(session._default_headers.items()) ==
([("H1", "header1"), ("H2", "header2")]))
def test_init_headers_list_of_tuples(create_session):
session = create_session(headers=[("h1", "header1"),
("h2", "header2"),
("h3", "header3")])
assert (session._default_headers ==
CIMultiDict([("h1", "header1"),
("h2", "header2"),
("h3", "header3")]))
def test_init_headers_MultiDict(create_session):
session = create_session(headers=MultiDict([("h1", "header1"),
("h2", "header2"),
("h3", "header3")]))
assert (session._default_headers ==
CIMultiDict([("H1", "header1"),
("H2", "header2"),
("H3", "header3")]))
def test_init_headers_list_of_tuples_with_duplicates(create_session):
session = create_session(headers=[("h1", "header11"),
("h2", "header21"),
("h1", "header12")])
assert (session._default_headers ==
CIMultiDict([("H1", "header11"),
("H2", "header21"),
("H1", "header12")]))
def test_init_cookies_with_simple_dict(create_session):
session = create_session(cookies={"c1": "cookie1",
"c2": "cookie2"})
assert set(session.cookies) == {'c1', 'c2'}
assert session.cookies['c1'].value == 'cookie1'
assert session.cookies['c2'].value == 'cookie2'
def test_init_cookies_with_list_of_tuples(create_session):
session = create_session(cookies=[("c1", "cookie1"),
("c2", "cookie2")])
assert set(session.cookies) == {'c1', 'c2'}
assert session.cookies['c1'].value == 'cookie1'
assert session.cookies['c2'].value == 'cookie2'
def test_merge_headers(create_session):
# Check incoming simple dict
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers({"h1": "h1"})
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_multi_dict(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers(MultiDict([("h1", "h1")]))
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_list_of_tuples(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers([("h1", "h1")])
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_list_of_tuples_duplicated_names(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers([("h1", "v1"),
("h1", "v2")])
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("H2", "header2"),
("H1", "v1"),
("H1", "v2")])
def test_http_GET(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.get("http://test.example.com",
params={"x": 1},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("GET", "http://test.example.com",),
dict(
params={"x": 1},
allow_redirects=True,
**params)]
def test_http_OPTIONS(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.options("http://opt.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("OPTIONS", "http://opt.example.com",),
dict(
params={"x": 2},
allow_redirects=True,
**params)]
def test_http_HEAD(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.head("http://head.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("HEAD", "http://head.example.com",),
dict(
params={"x": 2},
allow_redirects=False,
**params)]
def test_http_POST(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.post("http://post.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("POST", "http://post.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_PUT(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.put("http://put.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("PUT", "http://put.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_PATCH(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.patch("http://patch.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("PATCH", "http://patch.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_DELETE(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.delete("http://delete.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("DELETE",
"http://delete.example.com",),
dict(
params={"x": 2},
**params)]
def test_close(create_session, connector):
session = create_session(connector=connector)
session.close()
assert session.connector is None
assert connector.closed
def test_closed(session):
assert not session.closed
session.close()
assert session.closed
def test_connector(create_session, loop):
connector = TCPConnector(loop=loop)
session = create_session(connector=connector)
assert session.connector is connector
def test_connector_loop(loop):
with contextlib.ExitStack() as stack:
another_loop = asyncio.new_event_loop()
stack.enter_context(contextlib.closing(another_loop))
connector = TCPConnector(loop=another_loop)
stack.enter_context(contextlib.closing(connector))
with pytest.raises(ValueError) as ctx:
ClientSession(connector=connector, loop=loop)
assert re.match("loop argument must agree with connector",
str(ctx.value))
def test_cookies_are_readonly(session):
with pytest.raises(AttributeError):
session.cookies = 123
def test_detach(session):
conn = session.connector
try:
assert not conn.closed
session.detach()
assert session.connector is None
assert session.closed
assert not conn.closed
finally:
conn.close()
@pytest.mark.run_loop
def test_request_closed_session(session):
session.close()
with pytest.raises(RuntimeError):
yield from session.request('get', '/')
def test_close_flag_for_closed_connector(session):
conn = session.connector
assert not session.closed
conn.close()
assert session.closed
def test_double_close(connector, create_session):
session = create_session(connector=connector)
session.close()
assert session.connector is None
session.close()
assert session.closed
assert connector.closed
def test_del(connector, loop, warning):
# N.B. don't use session fixture, it stores extra reference internally
session = ClientSession(connector=connector, loop=loop)
loop.set_exception_handler(lambda loop, ctx: None)
with warning(ResourceWarning):
del session
gc.collect()
def test_context_manager(connector, loop):
with ClientSession(loop=loop, connector=connector) as session:
pass
assert session.closed
def test_borrow_connector_loop(connector, create_session, loop):
session = ClientSession(connector=connector, loop=None)
try:
assert session._loop, loop
finally:
session.close()
@pytest.mark.run_loop
def test_reraise_os_error(create_session):
err = OSError(1, "permission error")
req = mock.Mock()
req_factory = mock.Mock(return_value=req)
req.send = mock.Mock(side_effect=err)
session = create_session(request_class=req_factory)
@asyncio.coroutine
def create_connection(req):
# return self.transport, self.protocol
return mock.Mock(), mock.Mock()
session._connector._create_connection = create_connection
with pytest.raises(aiohttp.ClientOSError) as ctx:
yield from session.request('get', 'http://example.com')
e = ctx.value
assert e.errno == err.errno
assert e.strerror == err.strerror
@pytest.mark.run_loop
def test_request_ctx_manager_props(loop):
yield from asyncio.sleep(0, loop=loop) # to make it a task
with aiohttp.ClientSession(loop=loop) as client:
ctx_mgr = client.get('http://example.com')
next(ctx_mgr)
assert isinstance(ctx_mgr.gi_frame, types.FrameType)
assert not ctx_mgr.gi_running
assert isinstance(ctx_mgr.gi_code, types.CodeType)
@pytest.mark.run_loop
def test_cookie_jar_usage(create_app_and_client):
req_url = None
jar = mock.Mock()
jar.filter_cookies.return_value = None
@asyncio.coroutine
def handler(request):
nonlocal req_url
req_url = "http://%s/" % request.host
resp = web.Response()
resp.set_cookie("response", "resp_value")
return resp
app, client = yield from create_app_and_client(
client_params={"cookies": {"request": "req_value"},
"cookie_jar": jar}
)
app.router.add_route('GET', '/', handler)
# Updating the cookie jar with initial user defined cookies
jar.update_cookies.assert_called_with({"request": "req_value"})
jar.update_cookies.reset_mock()
yield from client.get("/")
# Filtering the cookie jar before sending the request,
# getting the request URL as only parameter
jar.filter_cookies.assert_called_with(req_url)
# Updating the cookie jar with the response cookies
assert jar.update_cookies.called
resp_cookies = jar.update_cookies.call_args[0][0]
assert isinstance(resp_cookies, http.cookies.SimpleCookie)
assert "response" in resp_cookies
assert resp_cookies["response"].value == "resp_value"
def test_session_default_version(loop):
session = aiohttp.ClientSession(loop=loop)
assert session.version == aiohttp.HttpVersion11
| apache-2.0 | 8,333,107,715,438,681,000 | 33 | 78 | 0.55112 | false | 4.27673 | true | false | false |
dnanexus/rseqc | rseqc/scripts/read_distribution.py | 1 | 11931 | #!/usr/bin/env python
'''-------------------------------------------------------------------------------------------------
Check reads distribution over exon, intron, UTR, intergenic ... etc
-------------------------------------------------------------------------------------------------'''
#import built-in modules
import os,sys
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
print >>sys.stderr, "\nYou are using python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1]) + " RSeQC needs python2.7!\n"
sys.exit()
import re
import string
from optparse import OptionParser
import warnings
import string
import collections
import math
import sets
#import third-party modules
from bx.bitset import *
from bx.bitset_builders import *
from bx.intervals import *
from bx.binned_array import BinnedArray
from bx_extras.fpconst import isNaN
from bx.bitset_utils import *
#import my own modules
from qcmodule import BED
from qcmodule import SAM
from qcmodule import bam_cigar
__author__ = "Liguo Wang"
__copyright__ = "Copyright 2012. All rights reserved."
__credits__ = []
__license__ = "GPL"
__version__="2.3.3"
__maintainer__ = "Liguo Wang"
__email__ = "[email protected]"
__status__ = "Production"
def cal_size(list):
'''calcualte bed list total size'''
size=0
for l in list:
size += l[2] - l[1]
return size
def foundone(chrom,ranges, st, end):
found = 0
if chrom in ranges:
found = len(ranges[chrom].find(st,end))
return found
def build_bitsets(list):
'''build intevalTree from list'''
ranges={}
for l in list:
chrom =l[0].upper()
st = int(l[1])
end = int(l[2])
if chrom not in ranges:
ranges[chrom] = Intersecter()
ranges[chrom].add_interval( Interval( st, end ) )
return ranges
def process_gene_model(gene_model):
print >>sys.stderr, "processing " + gene_model + ' ...',
obj = BED.ParseBED(gene_model)
utr_3 = obj.getUTR(utr=3)
utr_5 = obj.getUTR(utr=5)
cds_exon = obj.getCDSExon()
intron = obj.getIntron()
intron = BED.unionBed3(intron)
cds_exon=BED.unionBed3(cds_exon)
utr_5 = BED.unionBed3(utr_5)
utr_3 = BED.unionBed3(utr_3)
utr_5 = BED.subtractBed3(utr_5,cds_exon)
utr_3 = BED.subtractBed3(utr_3,cds_exon)
intron = BED.subtractBed3(intron,cds_exon)
intron = BED.subtractBed3(intron,utr_5)
intron = BED.subtractBed3(intron,utr_3)
intergenic_up_1kb = obj.getIntergenic(direction="up",size=1000)
intergenic_down_1kb = obj.getIntergenic(direction="down",size=1000)
intergenic_up_5kb = obj.getIntergenic(direction="up",size=5000)
intergenic_down_5kb = obj.getIntergenic(direction="down",size=5000)
intergenic_up_10kb = obj.getIntergenic(direction="up",size=10000)
intergenic_down_10kb = obj.getIntergenic(direction="down",size=10000)
#merge integenic region
intergenic_up_1kb=BED.unionBed3(intergenic_up_1kb)
intergenic_up_5kb=BED.unionBed3(intergenic_up_5kb)
intergenic_up_10kb=BED.unionBed3(intergenic_up_10kb)
intergenic_down_1kb=BED.unionBed3(intergenic_down_1kb)
intergenic_down_5kb=BED.unionBed3(intergenic_down_5kb)
intergenic_down_10kb=BED.unionBed3(intergenic_down_10kb)
#purify intergenic region
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,cds_exon)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,utr_5)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,utr_3)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,intron)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,cds_exon)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,utr_5)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,utr_3)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,intron)
#purify intergenic region
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,cds_exon)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,utr_5)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,utr_3)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,intron)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,cds_exon)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,utr_5)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,utr_3)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,intron)
#purify intergenic region
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,cds_exon)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,utr_5)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,utr_3)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,intron)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,cds_exon)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,utr_5)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,utr_3)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,intron)
#build intervalTree
cds_exon_ranges = build_bitsets(cds_exon)
utr_5_ranges = build_bitsets(utr_5)
utr_3_ranges = build_bitsets(utr_3)
intron_ranges = build_bitsets(intron)
interg_ranges_up_1kb_ranges = build_bitsets(intergenic_up_1kb)
interg_ranges_up_5kb_ranges = build_bitsets(intergenic_up_5kb)
interg_ranges_up_10kb_ranges = build_bitsets(intergenic_up_10kb)
interg_ranges_down_1kb_ranges = build_bitsets(intergenic_down_1kb)
interg_ranges_down_5kb_ranges = build_bitsets(intergenic_down_5kb)
interg_ranges_down_10kb_ranges = build_bitsets(intergenic_down_10kb)
exon_size = cal_size(cds_exon)
intron_size = cal_size(intron)
utr3_size = cal_size(utr_3)
utr5_size = cal_size(utr_5)
int_up1k_size = cal_size(intergenic_up_1kb)
int_up5k_size = cal_size(intergenic_up_5kb)
int_up10k_size = cal_size(intergenic_up_10kb)
int_down1k_size = cal_size(intergenic_down_1kb)
int_down5k_size = cal_size(intergenic_down_5kb)
int_down10k_size = cal_size(intergenic_down_10kb)
print >>sys.stderr, "Done"
return (cds_exon_ranges,intron_ranges,utr_5_ranges,utr_3_ranges,\
interg_ranges_up_1kb_ranges,interg_ranges_up_5kb_ranges,interg_ranges_up_10kb_ranges,\
interg_ranges_down_1kb_ranges,interg_ranges_down_5kb_ranges,interg_ranges_down_10kb_ranges,\
exon_size,intron_size,utr5_size,utr3_size,\
int_up1k_size,int_up5k_size,int_up10k_size,\
int_down1k_size,int_down5k_size,int_down10k_size)
def main():
usage="%prog [options]" + '\n' + __doc__ + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option("-i","--input-file",action="store",type="string",dest="input_file",help="Alignment file in BAM or SAM format.")
parser.add_option("-r","--refgene",action="store",type="string",dest="ref_gene_model",help="Reference gene model in bed format.")
(options,args)=parser.parse_args()
if not (options.input_file and options.ref_gene_model):
parser.print_help()
sys.exit(0)
if not os.path.exists(options.ref_gene_model):
print >>sys.stderr, '\n\n' + options.ref_gene_model + " does NOT exists" + '\n'
#parser.print_help()
sys.exit(0)
if not os.path.exists(options.input_file):
print >>sys.stderr, '\n\n' + options.input_file + " does NOT exists" + '\n'
sys.exit(0)
#build bitset
(cds_exon_r, intron_r, utr_5_r, utr_3_r,\
intergenic_up_1kb_r,intergenic_up_5kb_r,intergenic_up_10kb_r,\
intergenic_down_1kb_r,intergenic_down_5kb_r,intergenic_down_10kb_r,\
cds_exon_base,intron_base,utr_5_base,utr_3_base,\
intergenic_up1kb_base,intergenic_up5kb_base,intergenic_up10kb_base,\
intergenic_down1kb_base,intergenic_down5kb_base,intergenic_down10kb_base) = process_gene_model(options.ref_gene_model)
intron_read=0
cds_exon_read=0
utr_5_read=0
utr_3_read=0
intergenic_up1kb_read=0
intergenic_down1kb_read=0
intergenic_up5kb_read=0
intergenic_down5kb_read=0
intergenic_up10kb_read=0
intergenic_down10kb_read=0
totalReads=0
totalFrags=0
unAssignFrags=0
obj = SAM.ParseBAM(options.input_file)
R_qc_fail=0
R_duplicate=0
R_nonprimary=0
R_unmap=0
print >>sys.stderr, "processing " + options.input_file + " ...",
try:
while(1):
aligned_read = obj.samfile.next()
if aligned_read.is_qcfail: #skip QC fail read
R_qc_fail +=1
continue
if aligned_read.is_duplicate: #skip duplicate read
R_duplicate +=1
continue
if aligned_read.is_secondary: #skip non primary hit
R_nonprimary +=1
continue
if aligned_read.is_unmapped: #skip unmap read
R_unmap +=1
continue
totalReads +=1
chrom = obj.samfile.getrname(aligned_read.tid)
chrom=chrom.upper()
exons = bam_cigar.fetch_exon(chrom, aligned_read.pos, aligned_read.cigar)
totalFrags += len(exons)
for exn in exons:
#print chrom + '\t' + str(exn[1]) + '\t' + str(exn[2])
mid = int(exn[1]) + int((int(exn[2]) - int(exn[1]))/2)
if foundone(chrom,cds_exon_r,mid,mid) > 0:
cds_exon_read += 1
continue
elif foundone(chrom,utr_5_r,mid,mid) >0 and foundone(chrom,utr_3_r,mid,mid) == 0:
utr_5_read += 1
continue
elif foundone(chrom,utr_3_r,mid,mid) >0 and foundone(chrom,utr_5_r,mid,mid) == 0:
utr_3_read += 1
continue
elif foundone(chrom,utr_3_r,mid,mid) >0 and foundone(chrom,utr_5_r,mid,mid) > 0:
unAssignFrags +=1
continue
elif foundone(chrom,intron_r,mid,mid) > 0:
intron_read += 1
continue
elif foundone(chrom,intergenic_up_10kb_r,mid,mid) >0 and foundone(chrom,intergenic_down_10kb_r,mid,mid) > 0:
unAssignFrags +=1
continue
elif foundone(chrom,intergenic_up_1kb_r,mid,mid) >0:
intergenic_up1kb_read += 1
intergenic_up5kb_read += 1
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_up_5kb_r,mid,mid) >0:
intergenic_up5kb_read += 1
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_up_10kb_r,mid,mid) >0:
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_down_1kb_r,mid,mid) >0:
intergenic_down1kb_read += 1
intergenic_down5kb_read += 1
intergenic_down10kb_read += 1
elif foundone(chrom,intergenic_down_5kb_r,mid,mid) >0:
intergenic_down5kb_read += 1
intergenic_down10kb_read += 1
elif foundone(chrom,intergenic_down_10kb_r,mid,mid) >0:
intergenic_down10kb_read += 1
else:
unAssignFrags +=1
except StopIteration:
print >>sys.stderr, "Finished\n"
print "%-30s%d" % ("Total Reads",totalReads)
print "%-30s%d" % ("Total Tags",totalFrags)
print "%-30s%d" % ("Total Assigned Tags",totalFrags-unAssignFrags)
print "====================================================================="
print "%-20s%-20s%-20s%-20s" % ('Group','Total_bases','Tag_count','Tags/Kb')
print "%-20s%-20d%-20d%-18.2f" % ('CDS_Exons',cds_exon_base,cds_exon_read,cds_exon_read*1000.0/(cds_exon_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("5'UTR_Exons",utr_5_base,utr_5_read, utr_5_read*1000.0/(utr_5_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("3'UTR_Exons",utr_3_base,utr_3_read, utr_3_read*1000.0/(utr_3_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("Introns",intron_base,intron_read,intron_read*1000.0/(intron_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_1kb",intergenic_up1kb_base, intergenic_up1kb_read, intergenic_up1kb_read*1000.0/(intergenic_up1kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_5kb",intergenic_up5kb_base, intergenic_up5kb_read, intergenic_up5kb_read*1000.0/(intergenic_up5kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_10kb",intergenic_up10kb_base, intergenic_up10kb_read, intergenic_up10kb_read*1000.0/(intergenic_up10kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_1kb",intergenic_down1kb_base, intergenic_down1kb_read, intergenic_down1kb_read*1000.0/(intergenic_down1kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_5kb",intergenic_down5kb_base, intergenic_down5kb_read, intergenic_down5kb_read*1000.0/(intergenic_down5kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_10kb",intergenic_down10kb_base, intergenic_down10kb_read, intergenic_down10kb_read*1000.0/(intergenic_down10kb_base+1))
print "====================================================================="
if __name__ == '__main__':
main()
| gpl-3.0 | -9,165,150,322,009,244,000 | 39.036913 | 165 | 0.693068 | false | 2.34816 | false | false | false |
nickname456/pbots | poker.py | 1 | 5255 | #-----------------------------------------------------------#
# Heads Up Omaha Challange - Starter Bot #
#===========================================================#
# #
# Last update: 22 May, 2014 #
# #
# @author Jackie <[email protected]> #
# @version 1.0 #
# @license MIT License (http://opensource.org/licenses/MIT) #
#-----------------------------------------------------------#
class Card(object):
'''
Card class
'''
def __init__(self, suit, value):
self.suit = suit
self.value = value
self.number = '23456789TJQKA'.find(value)
def __repr__(self):
return self.value+self.suit
def __cmp__(self,other):
n_cmp = cmp(self.number,other.number)
if n_cmp!=0:
return n_cmp
return cmp(self.suit,other.suit)
class Pocket(object):
'''
Pocket class
'''
def __init__(self, cards):
self.cards = cards
def __iter__(self):
return iter(self.cards)
class Table(object):
'''
Table class
'''
def __init__(self, cards):
self.cards = cards
class Hand(object):
'''
Hand class
'''
def __init__(self, cards):
self.cards = cards
self.rank = Ranker.rank_five_cards(cards)
def __gt__(self, hand):
return self.rank > hand.rank
def __ge__(self, hand):
return self.rank >= hand.rank
def __lt__(self, hand):
return self.rank < hand.rank
def __le__(self, hand):
return self.rank <= hand.rank
def __eq__(self, hand):
return self.rank == hand.rank
def __repr__(self):
return "Hand:"+str(self.cards)+" rank"+str(self.rank)
# TODO: cache the below?
def is_flush_draw(self):
return Ranker.is_flush_draw(self.cards)
def is_straight_draw(self):
return Ranker.is_flush_draw(self.cards)
class Ranker(object):
'''
Ranker class
'''
@staticmethod
def rank_five_cards(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
# Checks if hand is a straight
is_straight = all([values[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Wheel
is_straight = all(values[i] == values[0] + i for i in range(4)) \
and values[4] == 12 \
and values[0] == 0
# Rotate values as the ace is weakest in this case
values = values[1:] + values[:1]
# Checks if hand is a flush
is_flush = all([card.suit == cards[0].suit for card in cards])
# Get card value counts
value_count = {value: values.count(value) for value in values}
# Sort value counts by most occuring
sorted_value_count = sorted([(count, value) for value, count in value_count.items()], reverse = True)
# Get all kinds (e.g. four of a kind, three of a kind, pair)
kinds = [value_count[0] for value_count in sorted_value_count]
# Get values for kinds
kind_values = [value_count[1] for value_count in sorted_value_count]
# Royal flush
if is_straight and is_flush and values[0] == 8:
return ['9'] + values
# Straight flush
if is_straight and is_flush:
return ['8'] + kind_values
# Four of a kind
if kinds[0] == 4:
return ['7'] + kind_values
# Full house
if kinds[0] == 3 and kinds[1] == 2:
return ['6'] + kind_values
# Flush
if is_flush:
return ['5'] + kind_values
# Straight
if is_straight:
return ['4'] + kind_values
# Three of a kind
if kinds[0] == 3:
return ['3'] + kind_values
# Two pair
if kinds[0] == 2 and kinds[1] == 2:
return ['2'] + kind_values
# Pair
if kinds[0] == 2:
return ['1'] + kind_values
# No pair
return ['0'] + kind_values
@staticmethod
def is_flush_draw(cards):
for i in range(0,5):
cards_ = cards[0:i]+cards[(i+1):]
same_suit = all([c.suit == cards_[0].suit for c in cards_])
if same_suit:
return True
return False
@staticmethod
def is_straight_draw(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
for i in range(0,5):
cards_ = cards[0:i]+cards[(i+1):]
assert False # copied logic from full hand, haven't fixed it up yet
sd = all([v[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Wheel
is_straight = all(values[i] == values[0] + i for i in range(4)) \
and values[4] == 12 \
and values[0] == 0
| mit | 5,522,409,518,547,737,000 | 27.873626 | 109 | 0.479163 | false | 3.881093 | false | false | false |
cmbclh/vnpy1.7 | vnpy/trader/app/login/uiLoginWidget.py | 1 | 7055 | # encoding: UTF-8
'''
登陆模块相关的GUI控制组件
'''
import sys
sys.path.append('../')
#sys.path.append('D:\\tr\\vnpy-master\\vn.trader\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\common')
import vnpy.DAO
import vnpy.common
from vnpy.DAO import *
import pandas as pd
import Tkinter
#from Tkinter import messagebox
from vnpy.trader.app.login.language import text
from vnpy.trader.uiBasicWidget import QtWidgets
TBUSER_COLUMNS = ['user_id','user_name','status','password','branch_no','open_date','cancel_date','passwd_date','op_group','op_rights','reserve1','dep_id','last_logon_date','last_logon_time','last_ip_address','fail_times','fail_date','reserve2','last_fail_ip']
########################################################################
class LoginSpinBox(QtWidgets.QLineEdit):#.QSpinBox):
"""调整参数用的数值框"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
super(LoginSpinBox, self).__init__()
#self.setMinimum(0)
#self.setMaximum(1000000)
self.setText(value)
########################################################################
class LoginLine(QtWidgets.QFrame):
"""水平分割线"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(LoginLine, self).__init__()
self.setFrameShape(self.HLine)
self.setFrameShadow(self.Sunken)
########################################################################
class LoginEngineManager(QtWidgets.QWidget):
"""风控引擎的管理组件"""
#----------------------------------------------------------------------
def __init__(self, loginEngine, eventEngine, parent=None):
"""Constructor"""
super(LoginEngineManager, self).__init__(parent)
self.loginEngine = loginEngine
self.eventEngine = eventEngine
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
print self
self.setWindowTitle(text.LOGIN_MANAGER)
# 设置界面
self.userId = LoginSpinBox(self.loginEngine.userId)
self.password = LoginSpinBox(self.loginEngine.password)
buttonLogin = QtWidgets.QPushButton(text.LOGIN)
buttonLogout = QtWidgets.QPushButton(text.LOGOUT)
buttonSubmit = QtWidgets.QPushButton(text.SUBMIT)
Label = QtWidgets.QLabel
grid = QtWidgets.QGridLayout()
grid.addWidget(Label(text.USERID), 2, 0)
grid.addWidget(self.userId, 2, 1)
grid.addWidget(Label(text.PASSWORD), 3, 0)
grid.addWidget(self.password, 3, 1)
grid.addWidget(LoginLine(), 4, 0, 1, 2)
hbox = QtWidgets.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(buttonSubmit)
hbox.addWidget(buttonLogin)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(grid)
vbox.addLayout(hbox)
self.setLayout(vbox)
# 连接组件信号
buttonSubmit.clicked.connect(self.submit)
buttonLogin.clicked.connect(self.login)
# 设为固定大小
self.setFixedSize(self.sizeHint())
# ----------------------------------------------------------------------
def login(self):
print (u'登陆验证开始self.userId=%s, self.password=%s' % (self.userId, self.password))
userId = str(self.userId.text())
password = str(self.password.text())
print (u'登陆验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT *' \
' from tbuser where user_id = \'%s\' and password = \'%s\' and status = 0 ' % (userId, password)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
if ret.empty :
print (u'登陆验证失败,用户不存在或密码不正确')
#QtWidgets.QMessageBox.information(self, "登陆失败", "用户不存在或密码不正确,请重试!", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
QtWidgets.QMessageBox.information(self, text.LOGINERROR,text.LOGINERRORINFO,
QtWidgets.QMessageBox.Retry)
#Tkinter.messagebox.showinfo('登陆验证失败,用户不存在或密码不正确')
else:
print (u'登陆验证成功')
QtWidgets.QMessageBox.information(self, text.LOGINSUSS, text.LOGINSUSSINFO, QtWidgets.QMessageBox.Ok)
self.close()
#Tkinter.messagebox.showinfo('欢迎')
except Exception as e:
print e
# ----------------------------------------------------------------------
def logout(self):
pass
# ----------------------------------------------------------------------
def submit(self):
userId = str(self.userId.text())
password = str(self.password.text())
print (u'注册验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT user_id,status' \
' from tbuser where user_id = \'%s\' ' % (userId)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
#若系统中无该用户,则直接插入注册
if ret.empty:
print (u'无此客户信息,可直接注册')
userData = [userId, userId, 0, password, '', 0, 0, 0, '', ' ', ' ', '', 0, 0, '', 0, 0, ' ', '']
d = pd.DataFrame([userData], columns=TBUSER_COLUMNS)
try:
print("开始写入TBUSER中")
vnpy.DAO.writeData('vnpy', 'tbuser', d)
print (u'注册成功')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e1:
print (u'注册失败')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Retry)
print e1
# 若系统中有该用户,则修改状态及密码,激活用户
else:
#暂时空
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e:
print e
#QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
# ----------------------------------------------------------------------
def closeLoginEngineManager(self):
self.close()
pass | mit | 3,855,137,239,608,203,000 | 36.542857 | 260 | 0.507992 | false | 3.635307 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.