repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kfoss/neon | neon/datasets/sparsenet.py | 1 | 5254 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Sparsenet is the natural image dataset used by Olshausen and Field
More info at: http://redwood.berkeley.edu/bruno/sparsenet/
"""
import logging
import os
import numpy
import pickle
from neon.util.compat import PY3, range
from neon.datasets.dataset import Dataset
if PY3:
from urllib.parse import urljoin as basejoin
else:
from urllib import basejoin
logger = logging.getLogger(__name__)
class SPARSENET(Dataset):
"""
Sets up a Sparsenet dataset.
Attributes:
raw_base_url (str): where to find the source data
raw_train_input_gz (str): URL of the full path to raw train inputs
raw_train_target_gz (str): URL of the full path to raw train targets
raw_test_input_gz (str): URL of the full path to raw test inputs
raw_test_target_gz (str): URL of the full path to raw test targets
backend (neon.backends.Backend): backend used for this data
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
Kwargs:
repo_path (str, optional): where to locally host this dataset on disk
"""
raw_base_url = 'http://redwood.berkeley.edu/bruno/sparsenet/'
raw_train_whitened = basejoin(raw_base_url, 'IMAGES.mat')
raw_train_unwhitened = basejoin(raw_base_url, 'IMAGES_RAW.mat')
def __init__(self, **kwargs):
self.macro_batched = False
self.__dict__.update(kwargs)
def read_image_file(self, fname, dtype=None):
"""
Carries out the actual reading of Sparsenet image files.
"""
logger.info("in read_image_file, reading: %s", fname)
with open(fname, 'rb') as infile:
array = pickle.load(infile)
infile.close()
return array
def load(self, backend=None, experiment=None):
"""
main function
"""
import scipy.io
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_idcs = list(range(10000))
if 'sample_pct' in self.__dict__:
if self.sample_pct > 1.0:
self.sample_pct /= 100.0
if self.sample_pct < 1.0:
numpy.random.seed(self.backend.rng_seed)
numpy.random.shuffle(train_idcs)
train_idcs = train_idcs[0:int(10000 * self.sample_pct)]
for url in (self.raw_train_unwhitened, self.raw_train_whitened):
name = os.path.basename(url).rstrip('.mat')
repo_mat_file = os.path.join(save_dir, name + '.mat')
repo_file = repo_mat_file.rstrip('.mat')
# download and create dataset
if not os.path.exists(repo_file):
self.download_to_repo(url, save_dir)
infile = scipy.io.loadmat(repo_mat_file)
with open(repo_file, 'wb') as outfile:
data = infile[infile.keys()[0]]
# patches are extracted so they can be cached
# doing non-overlapping 16x16 patches (1024 per image)
patches = data.reshape(512/16, 16, 512/16, 16, 10)
patches = patches.transpose(1, 3, 0, 2, 4)
patches = patches.reshape(16, 16, 1024*10)
logger.info("Caching to pickle file: %s", outfile)
pickle.dump(patches, outfile)
outfile.close()
logger.info('loading: %s', name)
# load existing data
if 'IMAGES' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
# flatten to 1D images
indat = indat.reshape((256, 10240)).transpose()[train_idcs]
self.inputs['train'] = indat
else:
logger.error('problems loading: %s', name)
self.format()
else:
raise AttributeError('repo_path not specified in config')
# TODO: try and download and read in directly?
| apache-2.0 | -2,070,562,520,108,554,800 | 41.032 | 79 | 0.557099 | false | 4.169841 | true | false | false |
wuher/devil | devil/util.py | 1 | 3000 | # -*- coding: utf-8 -*-
# util.py ---
#
# Created: Fri Dec 30 23:27:52 2011 (+0200)
# Author: Janne Kuuskeri
#
import re
charset_pattern = re.compile('.*;\s*charset=(.*)')
def camelcase_to_slash(name):
""" Converts CamelCase to camel/case
code ripped from http://stackoverflow.com/questions/1175208/does-the-python-standard-library-have-function-to-convert-camelcase-to-camel-cas
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1/\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1/\2', s1).lower()
def strip_charset(content_type):
""" Strip charset from the content type string.
:param content_type: The Content-Type string (possibly with charset info)
:returns: The Content-Type string without the charset information
"""
return content_type.split(';')[0]
def extract_charset(content_type):
""" Extract charset info from content type.
E.g. application/json;charset=utf-8 -> utf-8
:param content_type: The Content-Type string (possibly with charset info)
:returns: The charset or ``None`` if not found.
"""
match = charset_pattern.match(content_type)
return match.group(1) if match else None
def get_charset(request):
""" Extract charset from the content type
"""
content_type = request.META.get('CONTENT_TYPE', None)
if content_type:
return extract_charset(content_type) if content_type else None
else:
return None
def parse_accept_header(accept):
""" Parse the Accept header
todo: memoize
:returns: list with pairs of (media_type, q_value), ordered by q
values.
"""
def parse_media_range(accept_item):
""" Parse media range and subtype """
return accept_item.split('/', 1)
def comparator(a, b):
""" Compare accept items a and b """
# first compare q values
result = -cmp(a[2], b[2])
if result is not 0:
# q values differ, no need to compare media types
return result
# parse media types and compare them (asterisks are lower in precedence)
mtype_a, subtype_a = parse_media_range(a[0])
mtype_b, subtype_b = parse_media_range(b[0])
if mtype_a == '*' and subtype_a == '*':
return 1
if mtype_b == '*' and subtype_b == '*':
return -1
if subtype_a == '*':
return 1
if subtype_b == '*':
return -1
return 0
if not accept:
return []
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(comparator)
return result
#
# util.py ends here
| mit | 3,430,907,829,229,489,700 | 25.086957 | 144 | 0.581667 | false | 3.667482 | false | false | false |
cclljj/AnySense_7688 | pending/pm_hpm.py | 1 | 3267 | import mraa
import time
from multiprocessing import Queue,Process
import move_avge
NUM_INCOME_BYTE = 8
CHAR_PRELIM = 0x40
NUM_DATA_BYTE = 7
CHECK_BYTE = 7
PM1_BYTE = -1
PM25_BYTE = 3
PM10_BYTE = 5
class sensor(Process):
def __init__(self, q):
Process.__init__(self)
self.q = q
self.u=mraa.Uart(0)
self.u.setBaudRate(9600)
self.u.setMode(8, mraa.UART_PARITY_NONE, 1)
self.u.setFlowcontrol(False, False)
self.u.flush()
cmd = bytearray([0x68,0x01,0x02,0x95])
#cmd = bytearray([0x68,0x01,0x04,0x96])
self.u.write(cmd)
self.u.flush()
time.sleep(0.1)
if self.u.dataAvailable():
ready = False
while ready is False:
getstr = self.u.readStr(2)
bytedata = bytearray(getstr)
if bytedata[0]==165 and bytedata[1]==165:
ready = True
else:
time.sleep(0.1)
self.u.flush()
cmd = bytearray([0x68,0x01,0x01,0x96])
self.u.write(cmd)
self.u.flush()
time.sleep(0.1)
if self.u.dataAvailable():
ready = False
while ready is False:
getstr = self.u.readStr(2)
bytedata = bytearray(getstr)
for i in range (0,2,1):
print (int)(bytedata[i])
if bytedata[0]==165 and bytedata[1]==165:
ready = True
else:
time.sleep(0.1)
self.u.flush()
self.pm1_0_avg = move_avge.move_avg(1)
self.pm2_5_avg = move_avge.move_avg(1)
self.pm10_avg = move_avge.move_avg(1)
def data_log(self, dstr):
bytedata = bytearray(dstr)
if self.checksum(dstr) is True:
PM1_0 = -1
PM2_5 = bytedata[PM25_BYTE]*256 + bytedata[PM25_BYTE+1]
PM10 = bytedata[PM10_BYTE]*256 + bytedata[PM10_BYTE+1]
self.pm1_0_avg.add(PM1_0)
self.pm2_5_avg.add(PM2_5)
self.pm10_avg.add(PM10)
return True
else:
return False
def checksum(self, dstr):
bytedata = bytearray(dstr)
if bytedata[0]!=64 or bytedata[1]!=5 or bytedata[2]!=4:
return False
calcsum = 0
calcsum = bytedata[0] + bytedata[1] + bytedata[2] + 256 * bytedata[3] + bytedata[4] + 256 * bytedata[5] + bytedata[6]
calcsum = (65536 - calcsum) % 256
exptsum = bytedata[CHECK_BYTE]
if calcsum==exptsum:
return True
else:
return False
def get_data(self):
PM1_0 = self.pm1_0_avg.get()
PM2_5 = self.pm2_5_avg.get()
PM10 = self.pm10_avg.get()
ret = {
'PM1.0': PM1_0,
'PM2.5': PM2_5,
'PM10': PM10
}
return ret
def run(self):
count = 0
while True:
self.u.flush()
cmd = bytearray([0x68,0x01,0x04,0x93])
self.u.write(cmd)
self.u.flush()
time.sleep(1)
if self.u.dataAvailable():
getstr = self.u.readStr(NUM_INCOME_BYTE)
if len(getstr) == NUM_INCOME_BYTE:
if self.data_log(getstr) is True:
g = self.get_data()
self.q.put(g)
if __name__ == '__main__':
q = Queue()
p = sensor(q)
p.start()
while True:
print('air: '+ str(q.get()))
| gpl-3.0 | 4,554,266,414,521,395,700 | 23.380597 | 119 | 0.53015 | false | 2.65394 | false | false | false |
Larisa123/Kviz | main.py | 1 | 12835 | from tkinter import *
from tkinter import ttk
import random
button_width = 17
number_of_characters_per_row = 56
diff_for_answers = 8
color = '#%02x%02x%02x' % (231, 231, 231)
import subprocess # poskusile 5 razlicnih modulov: pyglet, mp3play, sound in se dva pa noben ni delal
# pygame se nama zdi prevelika knjiznica za dodati za samo nekaj zvokov
def play_button_click(): # dela samo na OS X!
subprocess.call(["afplay", "Sounds/button_click.mp3"])
# dela prepočasi!! - ko to dela, ne dela nič drugo!
# subprocess.call(["afplay", "music.mp3"]) # ce to igram, potem nic drugo ne dela dokler se glasba ne konca!
import gettext
_ = gettext.gettext
# noinspection PyBroadException
try:
en = gettext.translation('main', localedir='locale', languages=['en'])
en.install()
except:
print(_("Prevedba v angleski jezik ni bila mogoca."))
class Quiz(Tk):
frames = {}
number_of_questions = 5
question_count = 0
number_of_all_questions = 20 # per subject in SUBJECTdata.txt
points = 0 # number of points user gets for answering the question correctly
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
Tk.wm_title(self, _("Maturitetni kviz"))
self.initialize_container_frame()
self.initialize_start_page()
self.set_images()
def initialize_container_frame(self):
self.container = ttk.Frame(self) # to je frame, ki nima na sebi nič, na njega zlagama nove
self.container.pack_propagate(0)
self.container.pack(pady=10, padx=10)
self.container.grid_rowconfigure(0, weight=1)
# default weight je 0, kar pomeni da bo ta imel najvecji prostor ko spremenimo velikost - zaenkrat nima veze ker je sam
self.container.grid_columnconfigure(0, weight=1)
def initialize_start_page(self):
start_page = StartPage(self.container, self)
start_page.grid(row=0, column=0, sticky="nsew")
self.frames[0] = start_page
self.show_frame()
def show_frame(self):
if self.question_count <= self.number_of_questions:
frame = self.frames.get(self.question_count, None) # da slucajno ne pride do zrusitve programa
if frame is not None:
frame.tkraise() # naloži nov frame - vprašanje
else:
print(_("Nekaj se je zalomilo. Vprasanja ni bilo mogoče naložiti"))
self.question_count += 1
else:
self.show_result_frame()
def set_subject(self, subject):
self.create_random_questions(subject)
self.show_frame()
play_button_click()
def create_random_questions(self, subject):
random_question_numbers = []
table_of_possible_question_numbers = list(
range(1, self.number_of_all_questions + 1)) # iti more od 1 do vkljucno stevila
# tu samo dolocimo random stevilke vprasanj, stevilka pomeni vrstica v dokumentu:
while len(random_question_numbers) < self.number_of_questions:
rand_number = random.choice(table_of_possible_question_numbers)
random_question_numbers.append(rand_number)
if rand_number in table_of_possible_question_numbers:
table_of_possible_question_numbers.remove(rand_number)
else:
print(_("Pri določanju tvojih vprašanj se je zalomilo.")) # spet da slucajno ne pride do zrusitve
# nalozimo dejanska vprasanja, prikazemo zaenkrat se nobenega:
question_count = 1 # to ni lastnost metode self.question_count, ampak nova spremenljivka
for number in random_question_numbers:
question = Question(self.container, self, subject, number)
self.frames[question_count] = question
question_count += 1
question.grid(row=0, column=0, sticky="nsew")
def show_result_frame(self):
result_page = ResultPage(self.container, self)
result_page.grid(row=0, column=0, sticky="nsew")
result_page.tkraise()
# ponastavimo rezultate, ce bo slucajno igral ponovno:
self.question_count = 0
self.points = 0
self.destroy_previous_frames() # da se nam spomin ne zabase
def destroy_previous_frames(self):
for frame in self.frames.values():
frame.destroy()
self.frames = {}
def increase_points(self):
self.points += 1
def set_images(self):
correct_photo = PhotoImage(file="Images/correct.gif")
Label(self, image=correct_photo)
self.correct_photo = correct_photo
wrong_photo = wrong_photo = PhotoImage(file="Images/wrong.gif")
Label(self, image=wrong_photo)
self.wrong_photo = wrong_photo
class StartPage(ttk.Frame): # podeduje metode in lastnosti razreda
def __init__(self, parent, quiz_reference): # self je container - vse se bo nalagalo na container
ttk.Frame.__init__(self, parent)
self.quiz_reference = quiz_reference
self.show_frame()
def show_frame(self):
text = _('''Pozdravljen bodoči maturant!\nPred tabo je kratek kviz iz maturitetnih predmetov\n''')
ttk.Label(self, text=text, justify="center").pack(padx=10)
self.show_image()
ttk.Label(self, text=_("Izberi področje:")).pack(pady=10, padx=10)
button_geo = ttk.Button(self, text=_("Geografija"),
command=lambda: self.quiz_reference.set_subject("GEO"),
width=button_width)
button_geo.pack(side="bottom")
button_mat = ttk.Button(self, text=_("Matematika"),
command=lambda: self.quiz_reference.set_subject("MAT"),
width=button_width)
button_mat.pack(side="bottom")
# lambda uporabimo, da lahko podamo parameter in ob tem ne sprožimo klica funkcije
def show_image(self):
photo = PhotoImage(file="Images/slika.gif")
label = ttk.Label(self, image=photo)
self.start_page_image = photo # treba je imeti se eno povezavo, zato da je avtomatsko ne izbrise
label.pack()
class Question(ttk.Frame):
question = ""
correct_answer = 0
possible_answers = {}
chosen_answer = ""
is_confirm_button_showing = False
radio_buttons = []
def __init__(self, parent, quiz_reference, subject, number): # ko imama stevilko, poiscema vprasanje, odgovor in mozne odgovore iz datoteke
ttk.Frame.__init__(self, parent)
self.quiz_reference = quiz_reference
self.subject = subject
self.number = number
self.get_data()
self.show_frame_widgets()
def show_frame_widgets(self):
self.show_the_question()
self.show_possible_answers()
def show_the_question(self):
'''prikaze vprasanje na label widgetu'''
edited_text = self.check_if_text_too_long(self.question, number_of_characters_per_row)
ttk.Label(self, text=edited_text).pack(pady=15, padx=10, side="top")
def check_if_text_too_long(self, unedited_text, allowed_number_of_chars):
'''vrne primerno preurejen text z novimi vrsticami, ce je trenutno predolg'''
if len(unedited_text) <= number_of_characters_per_row: return unedited_text # je ze ok
text = '''''' # vecvrsticni string
num_of_chars = 0 # in current row
for word in unedited_text.split(" "):
num_of_chars += len(word)
if num_of_chars < allowed_number_of_chars:
text += word + " "
else:
text = text + word + "\n"
num_of_chars = 0
return text.strip("\n")
def show_possible_answers(self):
self.radio_buttons = {}
self.var = StringVar()
for possible_answer in self.possible_answers:
possible_answer = self.check_if_text_too_long(possible_answer,
number_of_characters_per_row - diff_for_answers)
R = ttk.Radiobutton(self,
compound="left",
text=possible_answer,
variable=self.var,
value=possible_answer,
command=self.set_chosen_answer)
# Ko uporabnik izbere odgovor, se mu prikaze gumb za potrditev, ko stisne nanj se preveri pravilnost izbire
self.radio_buttons[possible_answer] = R
R.pack(anchor='w')
def set_chosen_answer(self):
if not self.is_confirm_button_showing: self.show_confirm_button()
def show_confirm_button(self):
self.confirm_button = ttk.Button(self, text=_("Potrdi izbiro"),
command=self.check_the_answer,
width=button_width)
self.confirm_button.pack(pady=8, side="bottom")
self.is_confirm_button_showing = True
def change_text_on_confirm_button(self):
self.confirm_button.destroy()
self.next_q_button = ttk.Button(self, text=_("Naprej"),
command=self.confirm_button_pressed,
width=button_width)
self.next_q_button.pack(pady=8, side="bottom")
# prepreci da stisne na gumbe:
for text, radio_button in self.radio_buttons.items():
radio_button.configure(state=DISABLED)
#if radio_button.text == self.chosen_answer: print(self.chosen_answer) # to ne dela! zato je narejeno z slovarjem
if text == self.chosen_answer:
appropriate_image = self.quiz_reference.correct_photo if self.chosen_answer == self.correct_answer \
else self.quiz_reference.wrong_photo
#print(appropriate_image.name)
#radio_button.configure(image=appropriate_image) # TU BI SE MORALA PRIKAZATI ZRAVEN PRIMERNA SLIKA
def confirm_button_pressed(self):
play_button_click()
self.quiz_reference.show_frame()
def check_the_answer(self):
self.chosen_answer = self.var.get()
if self.chosen_answer == self.correct_answer: self.quiz_reference.increase_points()
self.change_text_on_confirm_button()
play_button_click()
def get_data(self):
data = self.subject + "data.txt"
with open(data, "r") as file:
lines = [line.strip() for line in file]
currentLine = lines[self.number]
# zapisano v obliki Vprasanje;odg1:odg2:odg3;odgovorPravilen
data = currentLine.split(";")
self.question = data[0]
self.correct_answer = data[2]
self.possible_answers = data[1].split(":")
class ResultPage(ttk.Frame):
def __init__(self, parent, quiz_reference): # ko imama stevilko, poiscema vprasanje, odgovor in mozne odgovore iz datoteke
ttk.Frame.__init__(self, parent)
self.quiz_reference = quiz_reference
self.show_frame_widgets()
def show_frame_widgets(self):
points = self.quiz_reference.points
all_points = self.quiz_reference.number_of_questions
ttk.Label(self, text="Tvoj rezultat je: {} od {} točk!".
format(points, all_points)).pack(pady=10, padx=10)
text_message = self.appropriate_message(points)
ttk.Label(self, text=text_message).pack(pady=10, padx=10)
appropriate_image = "Images/failure.gif" if points <= all_points // 2 else "Images/bravo.gif"
photo = PhotoImage(file=appropriate_image)
label = ttk.Label(self, image=photo)
self.congratulation_photo = photo
label.pack(pady=15)
ttk.Button(self, text="Igraj ponovno!",
command=self.quiz_reference.initialize_start_page,
width=button_width).pack(side="bottom")
def appropriate_message(self, user_points):
"""Prikaze sporocilo glede na rezultat"""
all_points = self.quiz_reference.number_of_questions
if user_points in range(all_points // 2 + 1):
message = "Tvoje znanje je nezadostno!"
elif user_points in range(all_points // 2 + 1, all_points // 4):
message = "Tvoje znanje je zadovoljivo."
elif user_points in range(all_points // 4, all_points):
message = "Čestitam, dosegel si skoraj vse točke!"
else:
message = "Bravo, tvoje znanje je izjemno!!!" # dosegel je vse točke
return message
app = Quiz()
app.geometry("500x250")
app.configure(bg=color) # sicer bi bil rob beli
# velikost okna - to ni resitev, hocem nastavit velikost vseh framov, ne samo okna, ker se zdaj čudno poravnava
app.resizable(0, 0) # v nobeno smer ni resizable
app.mainloop()
| apache-2.0 | 391,766,898,711,221,760 | 39.05625 | 144 | 0.61359 | false | 3.4 | false | false | false |
geometalab/OSMTagFinder | OSMTagFinder/thesaurus/mapsemnet.py | 1 | 1602 | # -*- coding: utf-8 -*-
'''
Created on 08.11.2014
@author: Simon Gwerder
'''
from rdflib.namespace import SKOS
from semnet.osmsemanticnet import OSMSemanticNet
from thesaurus.rdfgraph import RDFGraph
from utilities.configloader import ConfigLoader
class MapOSMSemanticNet:
def __init__(self, tagFinderRDF, osnSemNetFilePath=None):
if tagFinderRDF is None: return
osnSemNetRDF = None
if osnSemNetFilePath is not None:
#print('Loading OSN graph')
osnSemNetRDF = RDFGraph(osnSemNetFilePath)
osn = OSMSemanticNet(osnSemNetRDF) # if osnSemNetRDF is None it will check the web graph
termSchemeName = ConfigLoader().getThesaurusString('TERM_SCHEME_NAME')
count = 0
for subject, predicate, obj in tagFinderRDF.graph:
if not osn.baseUrl in subject and not termSchemeName in subject: # check if some osn matches have been added already
osnConcept = None
if predicate == SKOS.prefLabel:
count = count + 1
if '=' in str(obj):
splitArray = str(obj).split('=')
osnConcept = osn.getConcept(splitArray[0], splitArray[1])
else:
osnConcept = osn.getConcept(str(obj))
if osnConcept:
tagFinderRDF.addRelatedMatch(subject, osnConcept)
#print(str(count) + ' : Added Matching Concept Mapping from: ' + subject + '\t\t\tto: ' + osnConcept)
#tagFinderRDF.serialize(tagFinderRDF.filePath)
| mit | -4,895,832,965,683,772,000 | 32.375 | 128 | 0.611735 | false | 3.814286 | false | false | false |
bellowsj/aiopogo | aiopogo/pogoprotos/settings/master/quest_settings_pb2.py | 1 | 3401 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/master/quest_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.enums import quest_type_pb2 as pogoprotos_dot_enums_dot_quest__type__pb2
from pogoprotos.settings.master.quest import daily_quest_settings_pb2 as pogoprotos_dot_settings_dot_master_dot_quest_dot_daily__quest__settings__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/master/quest_settings.proto',
package='pogoprotos.settings.master',
syntax='proto3',
serialized_pb=_b('\n/pogoprotos/settings/master/quest_settings.proto\x12\x1apogoprotos.settings.master\x1a!pogoprotos/enums/quest_type.proto\x1a;pogoprotos/settings/master/quest/daily_quest_settings.proto\"\x8b\x01\n\rQuestSettings\x12/\n\nquest_type\x18\x01 \x01(\x0e\x32\x1b.pogoprotos.enums.QuestType\x12I\n\x0b\x64\x61ily_quest\x18\x02 \x01(\x0b\x32\x34.pogoprotos.settings.master.quest.DailyQuestSettingsb\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_quest__type__pb2.DESCRIPTOR,pogoprotos_dot_settings_dot_master_dot_quest_dot_daily__quest__settings__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_QUESTSETTINGS = _descriptor.Descriptor(
name='QuestSettings',
full_name='pogoprotos.settings.master.QuestSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='quest_type', full_name='pogoprotos.settings.master.QuestSettings.quest_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_quest', full_name='pogoprotos.settings.master.QuestSettings.daily_quest', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=315,
)
_QUESTSETTINGS.fields_by_name['quest_type'].enum_type = pogoprotos_dot_enums_dot_quest__type__pb2._QUESTTYPE
_QUESTSETTINGS.fields_by_name['daily_quest'].message_type = pogoprotos_dot_settings_dot_master_dot_quest_dot_daily__quest__settings__pb2._DAILYQUESTSETTINGS
DESCRIPTOR.message_types_by_name['QuestSettings'] = _QUESTSETTINGS
QuestSettings = _reflection.GeneratedProtocolMessageType('QuestSettings', (_message.Message,), dict(
DESCRIPTOR = _QUESTSETTINGS,
__module__ = 'pogoprotos.settings.master.quest_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.master.QuestSettings)
))
_sym_db.RegisterMessage(QuestSettings)
# @@protoc_insertion_point(module_scope)
| mit | 2,925,874,700,893,986,300 | 40.987654 | 424 | 0.757424 | false | 3.09745 | false | true | false |
seung-lab/cloud-volume | cloudvolume/storage/storage_interfaces.py | 1 | 15070 | import six
from collections import defaultdict
import json
import os.path
import posixpath
import re
import boto3
import botocore
from glob import glob
import google.cloud.exceptions
from google.cloud.storage import Batch, Client
import requests
import tenacity
from cloudvolume.connectionpools import S3ConnectionPool, GCloudBucketPool
from cloudvolume.lib import mkdir
from cloudvolume.exceptions import UnsupportedCompressionType
COMPRESSION_EXTENSIONS = ('.gz', '.br')
# This is just to support pooling by bucket
class keydefaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError( key )
else:
ret = self[key] = self.default_factory(key)
return ret
S3_POOL = None
GC_POOL = None
def reset_connection_pools():
global S3_POOL
global GC_POOL
S3_POOL = keydefaultdict(lambda service: keydefaultdict(lambda bucket_name: S3ConnectionPool(service, bucket_name)))
GC_POOL = keydefaultdict(lambda bucket_name: GCloudBucketPool(bucket_name))
reset_connection_pools()
retry = tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(7),
wait=tenacity.wait_random_exponential(0.5, 60.0),
)
class StorageInterface(object):
def release_connection(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.release_connection()
class FileInterface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
self._path = path
def get_path_to_file(self, file_path):
return os.path.join(
self._path.basepath, self._path.layer, file_path
)
def put_file(
self, file_path, content,
content_type, compress,
cache_control=None
):
path = self.get_path_to_file(file_path)
mkdir(os.path.dirname(path))
# keep default as gzip
if compress == "br":
path += ".br"
elif compress:
path += '.gz'
if content \
and content_type \
and re.search('json|te?xt', content_type) \
and type(content) is str:
content = content.encode('utf-8')
try:
with open(path, 'wb') as f:
f.write(content)
except IOError as err:
with open(path, 'wb') as f:
f.write(content)
def get_file(self, file_path, start=None, end=None):
path = self.get_path_to_file(file_path)
if os.path.exists(path + '.gz'):
encoding = "gzip"
path += '.gz'
elif os.path.exists(path + '.br'):
encoding = "br"
path += ".br"
else:
encoding = None
try:
with open(path, 'rb') as f:
if start is not None:
f.seek(start)
if end is not None:
start = start if start is not None else 0
num_bytes = end - start
data = f.read(num_bytes)
else:
data = f.read()
return data, encoding
except IOError:
return None, encoding
def exists(self, file_path):
path = self.get_path_to_file(file_path)
return os.path.exists(path) or any(( os.path.exists(path + ext) for ext in COMPRESSION_EXTENSIONS ))
def files_exist(self, file_paths):
return { path: self.exists(path) for path in file_paths }
def delete_file(self, file_path):
path = self.get_path_to_file(file_path)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path + '.gz'):
os.remove(path + '.gz')
elif os.path.exists(path + ".br"):
os.remove(path + ".br")
def delete_files(self, file_paths):
for path in file_paths:
self.delete_file(path)
def list_files(self, prefix, flat):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = os.path.join(layer_path, prefix) + '*'
filenames = []
remove = layer_path
if len(remove) and remove[-1] != '/':
remove += '/'
if flat:
for file_path in glob(path):
if not os.path.isfile(file_path):
continue
filename = file_path.replace(remove, '')
filenames.append(filename)
else:
subdir = os.path.join(layer_path, os.path.dirname(prefix))
for root, dirs, files in os.walk(subdir):
files = [ os.path.join(root, f) for f in files ]
files = [ f.replace(remove, '') for f in files ]
files = [ f for f in files if f[:len(prefix)] == prefix ]
for filename in files:
filenames.append(filename)
def stripext(fname):
(base, ext) = os.path.splitext(fname)
if ext in COMPRESSION_EXTENSIONS:
return base
else:
return fname
filenames = list(map(stripext, filenames))
return _radix_sort(filenames).__iter__()
class GoogleCloudStorageInterface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
global GC_POOL
self._path = path
self._bucket = GC_POOL[path.bucket].get_connection()
def get_path_to_file(self, file_path):
return posixpath.join(self._path.no_bucket_basepath, self._path.layer, file_path)
@retry
def put_file(self, file_path, content, content_type, compress, cache_control=None):
key = self.get_path_to_file(file_path)
blob = self._bucket.blob( key )
# gcloud disable brotli until content-encoding works
if compress == "br":
raise UnsupportedCompressionType("Brotli unsupported on google cloud storage")
elif compress:
blob.content_encoding = "gzip"
if cache_control:
blob.cache_control = cache_control
blob.upload_from_string(content, content_type)
@retry
def get_file(self, file_path, start=None, end=None):
key = self.get_path_to_file(file_path)
blob = self._bucket.blob( key )
if start is not None:
start = int(start)
if end is not None:
end = int(end - 1)
try:
# blob handles the decompression so the encoding is None
return blob.download_as_bytes(start=start, end=end), None # content, encoding
except google.cloud.exceptions.NotFound as err:
return None, None
@retry
def exists(self, file_path):
key = self.get_path_to_file(file_path)
blob = self._bucket.blob(key)
return blob.exists()
def files_exist(self, file_paths):
result = {path: None for path in file_paths}
MAX_BATCH_SIZE = Batch._MAX_BATCH_SIZE
for i in range(0, len(file_paths), MAX_BATCH_SIZE):
# Retrieve current batch of blobs. On Batch __exit__ it will populate all
# future responses before raising errors about the (likely) missing keys.
try:
with self._bucket.client.batch():
for file_path in file_paths[i:i+MAX_BATCH_SIZE]:
key = self.get_path_to_file(file_path)
result[file_path] = self._bucket.get_blob(key)
except google.cloud.exceptions.NotFound as err:
pass # Missing keys are expected
for file_path, blob in result.items():
# Blob exists if ``dict``, missing if ``_FutureDict``
result[file_path] = isinstance(blob._properties, dict)
return result
@retry
def delete_file(self, file_path):
key = self.get_path_to_file(file_path)
try:
self._bucket.delete_blob( key )
except google.cloud.exceptions.NotFound:
pass
def delete_files(self, file_paths):
MAX_BATCH_SIZE = Batch._MAX_BATCH_SIZE
for i in range(0, len(file_paths), MAX_BATCH_SIZE):
try:
with self._bucket.client.batch():
for file_path in file_paths[i : i + MAX_BATCH_SIZE]:
key = self.get_path_to_file(file_path)
self._bucket.delete_blob(key)
except google.cloud.exceptions.NotFound:
pass
@retry
def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = posixpath.join(layer_path, prefix)
for blob in self._bucket.list_blobs(prefix=path):
filename = blob.name.replace(layer_path, '')
if not filename:
continue
elif not flat and filename[-1] != '/':
yield filename
elif flat and '/' not in blob.name.replace(path, ''):
yield filename
def release_connection(self):
global GC_POOL
GC_POOL[self._path.bucket].release_connection(self._bucket)
class HttpInterface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
self._path = path
def get_path_to_file(self, file_path):
path = posixpath.join(
self._path.basepath, self._path.layer, file_path
)
return self._path.protocol + '://' + path
# @retry
def delete_file(self, file_path):
raise NotImplementedError()
def delete_files(self, file_paths):
raise NotImplementedError()
# @retry
def put_file(self, file_path, content, content_type, compress, cache_control=None):
raise NotImplementedError()
@retry
def get_file(self, file_path, start=None, end=None):
key = self.get_path_to_file(file_path)
if start is not None or end is not None:
start = int(start) if start is not None else ''
end = int(end - 1) if end is not None else ''
headers = { "Range": "bytes={}-{}".format(start, end) }
resp = requests.get(key, headers=headers)
else:
resp = requests.get(key)
if resp.status_code in (404, 403):
return None, None
resp.raise_for_status()
if 'Content-Encoding' not in resp.headers:
return resp.content, None
# requests automatically decodes these
elif resp.headers['Content-Encoding'] in ('', 'gzip', 'deflate', 'br'):
return resp.content, None
else:
return resp.content, resp.headers['Content-Encoding']
@retry
def exists(self, file_path):
key = self.get_path_to_file(file_path)
resp = requests.get(key, stream=True)
resp.close()
return resp.ok
def files_exist(self, file_paths):
return {path: self.exists(path) for path in file_paths}
def list_files(self, prefix, flat=False):
raise NotImplementedError()
class S3Interface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
global S3_POOL
self._path = path
self._conn = S3_POOL[path.protocol][path.bucket].get_connection()
def get_path_to_file(self, file_path):
return posixpath.join(self._path.no_bucket_basepath, self._path.layer, file_path)
@retry
def put_file(self, file_path, content, content_type, compress, cache_control=None, ACL="bucket-owner-full-control"):
key = self.get_path_to_file(file_path)
attrs = {
'Bucket': self._path.bucket,
'Body': content,
'Key': key,
'ContentType': (content_type or 'application/octet-stream'),
'ACL': ACL,
}
# keep gzip as default
if compress == "br":
attrs['ContentEncoding'] = 'br'
elif compress:
attrs['ContentEncoding'] = 'gzip'
if cache_control:
attrs['CacheControl'] = cache_control
self._conn.put_object(**attrs)
@retry
def get_file(self, file_path, start=None, end=None):
"""
There are many types of execptions which can get raised
from this method. We want to make sure we only return
None when the file doesn't exist.
"""
kwargs = {}
if start is not None or end is not None:
start = int(start) if start is not None else ''
end = int(end - 1) if end is not None else ''
kwargs['Range'] = "bytes={}-{}".format(start, end)
try:
resp = self._conn.get_object(
Bucket=self._path.bucket,
Key=self.get_path_to_file(file_path),
**kwargs
)
encoding = ''
if 'ContentEncoding' in resp:
encoding = resp['ContentEncoding']
return resp['Body'].read(), encoding
except botocore.exceptions.ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
return None, None
else:
raise
def exists(self, file_path):
exists = True
try:
self._conn.head_object(
Bucket=self._path.bucket,
Key=self.get_path_to_file(file_path),
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
exists = False
else:
raise
return exists
def files_exist(self, file_paths):
return {path: self.exists(path) for path in file_paths}
@retry
def delete_file(self, file_path):
# Not necessary to handle 404s here.
# From the boto3 documentation:
# delete_object(**kwargs)
# Removes the null version (if there is one) of an object and inserts a delete marker,
# which becomes the latest version of the object. If there isn't a null version,
# Amazon S3 does not remove any objects.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_object
self._conn.delete_object(
Bucket=self._path.bucket,
Key=self.get_path_to_file(file_path),
)
def delete_files(self, file_paths):
for path in file_paths:
self.delete_file(path)
def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = posixpath.join(layer_path, prefix)
@retry
def s3lst(continuation_token=None):
kwargs = {
'Bucket': self._path.bucket,
'Prefix': path,
}
if continuation_token:
kwargs['ContinuationToken'] = continuation_token
return self._conn.list_objects_v2(**kwargs)
resp = s3lst()
def iterate(resp):
if 'Contents' not in resp.keys():
resp['Contents'] = []
for item in resp['Contents']:
key = item['Key']
filename = key.replace(layer_path, '')
if not flat and filename[-1] != '/':
yield filename
elif flat and '/' not in key.replace(path, ''):
yield filename
for filename in iterate(resp):
yield filename
while resp['IsTruncated'] and resp['NextContinuationToken']:
resp = s3lst(resp['NextContinuationToken'])
for filename in iterate(resp):
yield filename
def release_connection(self):
global S3_POOL
S3_POOL[self._path.protocol][self._path.bucket].release_connection(self._conn)
def _radix_sort(L, i=0):
"""
Most significant char radix sort
"""
if len(L) <= 1:
return L
done_bucket = []
buckets = [ [] for x in range(255) ]
for s in L:
if i >= len(s):
done_bucket.append(s)
else:
buckets[ ord(s[i]) ].append(s)
buckets = [ _radix_sort(b, i + 1) for b in buckets ]
return done_bucket + [ b for blist in buckets for b in blist ] | bsd-3-clause | 6,737,976,082,698,986,000 | 27.76145 | 118 | 0.633046 | false | 3.628702 | false | false | false |
SCPR/kpcc_backroom_handshakes | measure_finance/models.py | 1 | 7431 | from django.conf import settings
from django.db import models
from django.utils.encoding import smart_str
from django.utils import timezone
from django.template.defaultfilters import slugify
from django.shortcuts import get_object_or_404
from election_registrar import models as registrar
import logging
import time
import datetime
logger = logging.getLogger("kpcc_backroom_handshakes")
class Measure(models.Model):
election = models.ForeignKey(registrar.Election, null=True)
measure_id = models.IntegerField("", null=True, blank=True)
official_identifier = models.CharField("official_identifier", max_length=255, null=True, blank=True)
official_identifier_slug = models.SlugField("official_identifier_slug", max_length=140, null=True, blank=True)
topic = models.CharField("", max_length=255, null=True, blank=True)
official_title = models.CharField("", max_length=255, null=True, blank=True)
official_short_summary = models.TextField(" ", null=True, blank=True)
official_summary = models.TextField(" ", null=True, blank=True)
official_summary_author = models.CharField("", max_length=255, null=True, blank=True)
official_yes_vote_means = models.TextField(" ", null=True, blank=True)
official_no_vote_means = models.TextField(" ", null=True, blank=True)
official_vote_means_source = models.CharField("", max_length=255, null=True, blank=True)
official_financial_effect = models.TextField(" ", null=True, blank=True)
official_financial_effect_author = models.CharField("", max_length=255, null=True, blank=True)
official_impartial_analysis = models.TextField(" ", null=True, blank=True)
official_impartial_analysis_author = models.CharField("", max_length=255, null=True, blank=True)
# official_background = models.TextField(" ", null=True, blank=True)
# official_background_author = models.CharField("", max_length=255, null=True, blank=True)
official_tax_rate = models.CharField("", max_length=255, null=True, blank=True)
official_tax_rate_author = models.CharField("", max_length=255, null=True, blank=True)
official_short_arguments_yes = models.TextField(" ", null=True, blank=True)
official_short_arguments_no = models.TextField(" ", null=True, blank=True)
official_short_arguments_source = models.CharField("", max_length=255, null=True, blank=True)
# official_arguments_yes = models.TextField(" ", null=True, blank=True)
# official_arguments_no = models.TextField(" ", null=True, blank=True)
# official_arguments_source = models.CharField("", max_length=255, null=True, blank=True)
official_rebuttal_yes = models.TextField(" ", null=True, blank=True)
official_rebuttal_no = models.TextField(" ", null=True, blank=True)
measure_type = models.CharField("", max_length=255, null=True, blank=True)
passage_requirements = models.CharField("", max_length=255, null=True, blank=True)
fulltext_link = models.URLField("fulltext_link", max_length=1024, null=True, blank=True)
# full_text = models.TextField(" ", null=True, blank=True)
# simplified_title = models.CharField("", max_length=255, null=True, blank=True)
# way_it_is = models.TextField(" ", null=True, blank=True)
# what_if_pass = models.TextField(" ", null=True, blank=True)
# budget_effect = models.TextField(" ", null=True, blank=True)
# people_for_say = models.TextField(" ", null=True, blank=True)
# people_against_say = models.TextField(" ", null=True, blank=True)
# evg_source = models.CharField("", max_length=255, null=True, blank=True)
# lwv_question = models.TextField(" ", null=True, blank=True)
# lwv_situation = models.TextField(" ", null=True, blank=True)
# lwv_proposal = models.TextField(" ", null=True, blank=True)
# lwv_fiscal_effects = models.TextField(" ", null=True, blank=True)
# lwv_supporters_say = models.TextField(" ", null=True, blank=True)
# lwv_opponents_say = models.TextField(" ", null=True, blank=True)
# lwv_source = models.CharField("", max_length=255, null=True, blank=True)
# status = models.CharField("", max_length=255, null=True, blank=True)
# votes_for = models.CharField("", max_length=255, null=True, blank=True)
# votes_against = models.CharField("", max_length=255, null=True, blank=True)
# weight = models.CharField("", max_length=255, null=True, blank=True)
published = models.CharField("", max_length=255, null=True, blank=True)
disable_finance_data = models.CharField("", max_length=255, null=True, blank=True)
deleted = models.CharField("", max_length=255, null=True, blank=True)
entity_type = models.CharField("", max_length=255, null=True, blank=True)
measure_timestamp = models.DateTimeField("", null=True, blank=True)
created = models.DateTimeField("Date Created", auto_now_add=True)
modified = models.DateTimeField("Date Modified", auto_now=True)
def __unicode__(self):
return self.official_identifier
def get_absolute_url(self):
return ("measure-detail")
class MeasureContributor(models.Model):
measure = models.ForeignKey(Measure)
finance_top_id = models.IntegerField("", null=True, blank=True)
top_type = models.CharField("", max_length=255, null=True, blank=True)
support = models.CharField("", max_length=255, null=True, blank=True)
name = models.CharField("", max_length=255, null=True, blank=True)
total_amount = models.FloatField("", null=True, blank=True)
total_individual = models.FloatField("", null=True, blank=True)
total_organization = models.FloatField("", null=True, blank=True)
percentage_total = models.FloatField("", null=True, blank=True)
percentage_individual = models.FloatField("", null=True, blank=True)
percentage_organization = models.FloatField("", null=True, blank=True)
updated_date = models.DateField("", null=True, blank=True)
entity_type = models.IntegerField("", null=True, blank=True)
finance_top_timestamp = models.DateTimeField("", null=True, blank=True)
created = models.DateTimeField("Date Created", auto_now_add=True)
modified = models.DateTimeField("Date Modified", auto_now=True)
def __unicode__(self):
return self.name
class MeasureTotal(models.Model):
measure = models.ForeignKey(Measure)
finance_id = models.CharField("", max_length=255, null=True, blank=True)
support = models.CharField("", max_length=255, null=True, blank=True)
total_amount = models.FloatField("", null=True, blank=True)
total_individual = models.FloatField("", null=True, blank=True)
total_unitemized = models.FloatField("", null=True, blank=True)
total_itemized = models.FloatField("", null=True, blank=True)
total_organization = models.FloatField("", null=True, blank=True)
pct_individual = models.FloatField("", null=True, blank=True)
pct_organization = models.FloatField("", null=True, blank=True)
pct_unitemized = models.FloatField("", null=True, blank=True)
pct_itemized = models.FloatField("", null=True, blank=True)
updated_date = models.DateField("", null=True, blank=True)
entity_type = models.IntegerField("", null=True, blank=True)
finance_timestamp = models.DateTimeField("", null=True, blank=True)
created = models.DateTimeField("Date Created", auto_now_add=True)
modified = models.DateTimeField("Date Modified", auto_now=True)
def __unicode__(self):
return self.support
| mit | 9,157,139,782,778,764,000 | 59.909836 | 114 | 0.704885 | false | 3.609034 | false | false | false |
ttroy50/cmake-examples | 04-static-analysis/clang-format/cmake/scripts/clang-format-check-changed.py | 1 | 5095 | #!/usr/bin/env python
import argparse
import os
import sys
import subprocess
def check_file(filename, excludes, extensions):
"""
Check if a file should be included in our check
"""
name, ext = os.path.splitext(filename)
if len(ext) > 0 and ext in extensions:
if len(excludes) == 0:
return True
for exclude in excludes:
if exclude in filename:
return False
return True
return False
def check_directory(directory, excludes, extensions):
output = []
if len(excludes) > 0:
for exclude in excludes:
if exclude in directory:
directory_excluded = False
return output
for root, _, files in os.walk(directory):
for file in files:
filename = os.path.join(root, file)
if check_file(filename, excludes, extensions):
print("Will check file [{}]".format(filename))
output.append(filename)
return output
def get_git_root(git_bin):
cmd = [git_bin, "rev-parse", "--show-toplevel"]
try:
return subprocess.check_output(cmd).strip()
except subprocess.CalledProcessError, e:
print("Error calling git [{}]".format(e))
raise
def clean_git_filename(line):
"""
Takes a line from git status --porcelain and returns the filename
"""
file = None
git_status = line[:2]
# Not an exhaustive list of git status output but should
# be enough for this case
# check if this is a delete
if 'D' in git_status:
return None
# ignored file
if '!' in git_status:
return None
# Covers renamed files
if '->' in line:
file = line[3:].split('->')[-1].strip()
else:
file = line[3:].strip()
return file
def get_changed_files(git_bin, excludes, file_extensions):
"""
Run git status and return the list of changed files
"""
extensions = file_extensions.split(",")
# arguments coming from cmake will be *.xx. We want to remove the *
for i, extension in enumerate(extensions):
if extension[0] == '*':
extensions[i] = extension[1:]
git_root = get_git_root(git_bin)
cmd = [git_bin, "status", "--porcelain", "--ignore-submodules"]
print("git cmd = {}".format(cmd))
output = []
returncode = 0
try:
cmd_output = subprocess.check_output(cmd)
for line in cmd_output.split('\n'):
if len(line) > 0:
file = clean_git_filename(line)
if not file:
continue
file = os.path.join(git_root, file)
if file[-1] == "/":
directory_files = check_directory(
file, excludes, file_extensions)
output = output + directory_files
else:
if check_file(file, excludes, file_extensions):
print("Will check file [{}]".format(file))
output.append(file)
except subprocess.CalledProcessError, e:
print("Error calling git [{}]".format(e))
returncode = e.returncode
return output, returncode
def run_clang_format(clang_format_bin, changed_files):
"""
Run clang format on a list of files
@return 0 if formatted correctly.
"""
if len(changed_files) == 0:
return 0
cmd = [clang_format_bin, "-style=file",
"-output-replacements-xml"] + changed_files
print("clang-format cmd = {}".format(cmd))
try:
cmd_output = subprocess.check_output(cmd)
if "replacement offset" in cmd_output:
print("ERROR: Changed files don't match format")
return 1
except subprocess.CalledProcessError, e:
print("Error calling clang-format [{}]".format(e))
return e.returncode
return 0
def cli():
# global params
parser = argparse.ArgumentParser(prog='clang-format-check-changed',
description='Checks if files chagned in git match the .clang-format specification')
parser.add_argument("--file-extensions", type=str,
default=".cpp,.h,.cxx,.hxx,.hpp,.cc,.ipp",
help="Comma separated list of file extensions to check")
parser.add_argument('--exclude', action='append', default=[],
help='Will not match the files / directories with these in the name')
parser.add_argument('--clang-format-bin', type=str, default="clang-format",
help="The clang format binary")
parser.add_argument('--git-bin', type=str, default="git",
help="The git binary")
args = parser.parse_args()
# Run gcovr to get the .gcda files form .gcno
changed_files, returncode = get_changed_files(
args.git_bin, args.exclude, args.file_extensions)
if returncode != 0:
return returncode
return run_clang_format(args.clang_format_bin, changed_files)
if __name__ == '__main__':
sys.exit(cli())
| mit | -7,903,845,470,487,237,000 | 30.257669 | 120 | 0.575466 | false | 4.245833 | false | false | false |
datapythonista/pandas | pandas/tests/indexing/test_scalar.py | 4 | 9940 | """ test scalar indexing, including at and iat """
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.indexing.common import Base
class TestScalar(Base):
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
self.check_values(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
_check(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
class TestScalar2:
# TODO: Better name, just separating things that dont need Base class
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range("1/1/2000", periods=8)
df = DataFrame(np.random.randn(8, 4), index=dates, columns=["A", "B", "C", "D"])
s = df["A"]
result = s.at[dates[5]]
xp = s.values[5]
assert result == xp
# GH 7729
# make sure we are boxing the returns
s = Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]")
expected = Timestamp("2014-02-02")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
s = Series(["1 days", "2 days"], dtype="timedelta64[ns]")
expected = Timedelta("2 days")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64")
result = s.iloc[2]
assert result == 2
result = s.iat[2]
assert result == 2
msg = "index 10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[10]
msg = "index -10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[-10]
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype="int64")
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
assert result == 2
def test_frame_at_with_duplicate_axes(self):
# GH#33041
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
result = df.at[0, "A"]
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.T.at["A", 0]
tm.assert_series_equal(result, expected)
# setter
df.at[1, "A"] = 2
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
def test_at_getitem_dt64tz_values(self):
# gh-15822
df = DataFrame(
{
"name": ["John", "Anderson"],
"date": [
Timestamp(2017, 3, 13, 13, 32, 56),
Timestamp(2017, 2, 16, 12, 10, 3),
],
}
)
df["date"] = df["date"].dt.tz_localize("Asia/Shanghai")
expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai")
result = df.loc[0, "date"]
assert result == expected
result = df.at[0, "date"]
assert result == expected
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
with pytest.raises(KeyError, match="^4$"):
s.at[4]
with pytest.raises(KeyError, match="^4$"):
s.loc[4]
def test_mixed_index_at_iat_loc_iloc_dataframe(self):
# GH 19860
df = DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
for i in range(5):
assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i
with pytest.raises(KeyError, match="^3$"):
df.at[0, 3]
with pytest.raises(KeyError, match="^3$"):
df.loc[0, 3]
def test_iat_setter_incompatible_assignment(self):
# GH 23236
result = DataFrame({"a": [0, 1], "b": [4, 5]})
result.iat[0, 0] = None
expected = DataFrame({"a": [None, 1], "b": [4, 5]})
tm.assert_frame_equal(result, expected)
def test_getitem_zerodim_np_array(self):
# GH24924
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[np.array(0)]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
s = Series([1, 2])
result = s[np.array(0)]
assert result == 1
def test_iat_dont_wrap_object_datetimelike():
# GH#32809 .iat calls go through DataFrame._get_value, should not
# call maybe_box_datetimelike
dti = date_range("2016-01-01", periods=3)
tdi = dti - dti
ser = Series(dti.to_pydatetime(), dtype=object)
ser2 = Series(tdi.to_pytimedelta(), dtype=object)
df = DataFrame({"A": ser, "B": ser2})
assert (df.dtypes == object).all()
for result in [df.at[0, "A"], df.iat[0, 0], df.loc[0, "A"], df.iloc[0, 0]]:
assert result is ser[0]
assert isinstance(result, datetime)
assert not isinstance(result, Timestamp)
for result in [df.at[1, "B"], df.iat[1, 1], df.loc[1, "B"], df.iloc[1, 1]]:
assert result is ser2[1]
assert isinstance(result, timedelta)
assert not isinstance(result, Timedelta)
def test_at_with_tuple_index_get():
# GH 26989
# DataFrame.at getter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
assert df.at[(1, 2), "a"] == 1
# Series.at getter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
assert series.at[(1, 2)] == 1
def test_at_with_tuple_index_set():
# GH 26989
# DataFrame.at setter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
df.at[(1, 2), "a"] = 2
assert df.at[(1, 2), "a"] == 2
# Series.at setter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
series.at[1, 2] = 3
assert series.at[1, 2] == 3
class TestMultiIndexScalar:
def test_multiindex_at_get(self):
# GH 26989
# DataFrame.at and DataFrame.loc getter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
assert df.at[(1, 3), "a"] == 1
assert df.loc[(1, 3), "a"] == 1
# Series.at and Series.loc getter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
assert series.at[1, 3] == 1
assert series.loc[1, 3] == 1
def test_multiindex_at_set(self):
# GH 26989
# DataFrame.at and DataFrame.loc setter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
df.at[(1, 3), "a"] = 3
assert df.at[(1, 3), "a"] == 3
df.loc[(1, 3), "a"] = 4
assert df.loc[(1, 3), "a"] == 4
# Series.at and Series.loc setter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
series.at[1, 3] = 5
assert series.at[1, 3] == 5
series.loc[1, 3] = 6
assert series.loc[1, 3] == 6
def test_multiindex_at_get_one_level(self):
# GH#38053
s2 = Series((0, 1), index=[[False, True]])
result = s2.at[False]
assert result == 0
| bsd-3-clause | 5,832,607,505,893,796,000 | 30.356467 | 88 | 0.520523 | false | 3.328868 | true | false | false |
SahSih/ARStreaming360Display | RealTimeVideoStitch/motion_detector.py | 1 | 2815 | # USAGE
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(1)
time.sleep(0.25)
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
| mit | 7,161,149,992,135,203,000 | 28.946809 | 84 | 0.705151 | false | 2.896091 | false | false | false |
BronyTV/bronytv.net | btv_site/assets.py | 1 | 2355 | from flask_assets import Environment, Bundle
assets = Environment()
global_css = ["css/vendor/bootstrap.css", "css/global.css"]
global_js = ["js/vendor/jquery.js", "js/vendor/angular.js", "js/angular/common.js",
"js/header.js"]
def make_css(name, assets):
return Bundle(*assets, filters="cssmin", output="min/css/%s.css" % name)
def make_js(name, assets):
return Bundle(*assets, filters="jsmin", output="min/js/%s.js" % name)
def register_all(lst):
for asset_type, bundle_name, asset_files in lst:
if isinstance(asset_files, str):
asset_files = [asset_files]
if asset_type == "css":
assets.register(bundle_name, make_css(bundle_name, global_css + asset_files))
else:
assets.register(bundle_name, make_js(bundle_name, global_js + asset_files))
"""
Assets definitions look like this:
(asset_type, bundle_name, asset_files)
Where:
asset_type is one of "css" or "js"
bundle_name is the asset bundle name that will be used in templates
asset_files is a list of file names to add to the bundle, or a single filename str if there's only one
"""
register_all([
("css", "index_css", "css/index.css"),
("js", "index_js", ["js/vendor/moment.js", "js/vendor/moment-timezone-with-data-2010-2020.js",
"js/vendor/humanize-duration.js", "js/vendor/angular-timer.js", "js/angular/index.js"]),
("css", "stream_css", ["css/vendor/video-js.css", "css/stream.css", "css/vendor/animate.css"]),
("js", "stream_js", ["js/vendor/angular-animate.js", "js/vendor/video.js", "js/vendor/bootstrap-notify.js", "js/angular/stream.js"]),
("css", "chat_css", ["css/chat.css"]),
("js", "chat_js", []),
("css", "schedule_css", ["css/schedule.css"]),
("js", "schedule_js", []),
("css", "event_css", ["css/event.css"]),
("js", "event_js", ["js/vendor/moment.js", "js/angular/event.js"]),
("css", "about_css", []),
("js", "about_js", "js/angular/about.js"),
("css", "rules_css", []),
("js", "rules_js", "js/angular/rules.js"),
("css", "contact_css", []),
("js", "contact_js", []),
("css", "admin_index_css", "css/admin/index.css"),
("js", "admin_index_js", "js/angular/admin/index.js"),
("css", "admin_login_css", "css/admin/login.css"),
("js", "admin_login_js", [])
])
| gpl-3.0 | -5,187,518,734,788,820,000 | 32.642857 | 137 | 0.6 | false | 3.078431 | false | false | false |
device42/nix_bsd_mac_inventory | module_hpux.py | 1 | 10053 | import paramiko
import math
import json
class GetHPUXData:
def __init__(self, ip, ssh_port, timeout, usr, pwd, use_key_file, key_file,
get_serial_info, get_hardware_info, get_os_details,
get_cpu_info, get_memory_info, ignore_domain, upload_ipv6, debug):
self.machine_name = ip
self.port = int(ssh_port)
self.timeout = timeout
self.username = usr
self.password = pwd
self.ssh = paramiko.SSHClient()
self.use_key_file = use_key_file
self.key_file = key_file
self.get_serial_info = get_serial_info
self.get_hardware_info = get_hardware_info
self.get_os_details = get_os_details
self.get_cpu_info = get_cpu_info
self.get_memory_info = get_memory_info
self.ignore_domain = ignore_domain
self.upload_ipv6 = upload_ipv6
self.debug = debug
self.ssh = paramiko.SSHClient()
self.conn = None
self.root = False
self.sysdata = {}
self.nic_data = {'nic_parts': {}}
self.ip_data = []
self.disk_data = {'hdd_parts':[]}
self.name = None
self.paths = {}
self.alldata = []
self.name = None
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def main(self):
self.connect()
self.are_u_root()
self.get_sys_1()
self.get_sys_2()
self.get_macs()
self.get_ips()
self.get_cpu_num()
self.get_disks()
self.format_data()
return self.alldata
def connect(self):
try:
if not self.use_key_file:
self.ssh.connect(str(self.machine_name), port=self.port,
username=self.username, password=self.password, timeout=self.timeout)
else:
self.ssh.connect(str(self.machine_name), port=self.port,
username=self.username, key_filename=self.key_file, timeout=self.timeout)
except paramiko.AuthenticationException:
print str(self.machine_name) + ': authentication failed'
return None
except Exception as err:
print str(self.machine_name) + ': ' + str(err)
return None
def execute(self, cmd, need_sudo=False):
if need_sudo and not self.root: # not working currently, maybe in the future
cmd_sudo = "sudo -S -p '' %s" % cmd
stdin, stdout, stderr = self.ssh.exec_command(cmd_sudo, timeout=30)
stdin.write('%s\n' % self.password)
stdin.flush()
else:
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30)
data_err = stderr.readlines()
data_out = stdout.readlines()
return data_out, data_err
def are_u_root(self):
cmd = 'id -u'
data, err = self.execute(cmd)
if data[0].strip() == '0':
self.root = True
else:
self.root = False
if not self.root:
print '[!] You must be root to run HP-UX discovery!'
return
def format_data(self):
self.alldata.append(self.sysdata)
self.alldata.append(self.nic_data)
self.alldata.append(self.disk_data)
def get_sys_1(self):
cmd = '/usr/contrib/bin/machinfo'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
if rec.startswith('memory:'):
ram = int(math.ceil(float(rec.split()[1])))
self.sysdata.update({'memory':ram})
if rec.startswith('model:'):
model = rec.split(':')[1].strip().strip('"')
self.sysdata.update({'hardware': model})
if rec.startswith('machine id number:'):
uuid = rec.split(':')[1].strip()
self.sysdata.update({'uuid': uuid})
if rec.startswith('machine serial number'):
serial = rec.split(':')[1].strip()
self.sysdata.update({'serial_no': serial})
if rec.startswith('nodename:'):
name = rec.split(':')[1].strip()
self.sysdata.update({'name': name})
self.name = name
if rec.startswith('release:'):
os_version = rec.split(':')[1].strip()
osver = ' '.join(os_version.split()[1:]).strip()
self.sysdata.update({'os': 'hp-ux'})
self.sysdata.update({'osver': osver if osver else 'D42_NULL'})
else:
print '[!] Error in get_sys_1(). Message was: %s' % data_err
def get_sys_2(self):
cmd = '/opt/ignite/bin/print_manifest'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
if rec.startswith('model:'):
model = rec.split(':')[1].strip()
self.sysdata.update({'hardware': model})
if rec.startswith('main memory:'):
m = rec.split(':')[1].split()[0]
ram = int(math.ceil(float(m.strip())))
self.sysdata.update({'memory': ram})
if 'speed:' in rec and 'mhz' in rec:
cpu_speed= rec.split(':')[1].strip('mhz').strip()
self.sysdata.update({'cpupower': cpu_speed})
if rec.startswith('hostname'):
name = rec.split(':')[1].strip()
self.name = name
self.sysdata.update({'name': name})
else:
print '[!] Error in get_sys_2(). Message was: %s' % data_err
def get_macs(self):
cmd = 'lanscan'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
if rec.split()[3] == 'up':
words = rec.split()
nic_mac = words[1]
nic_name = words[4]
mac = ''.join(nic_mac.split('0x')[1:])
n=2
raw = [mac[i:i + n] for i in range(0, len(mac), n)]
macaddress = ':'.join(raw)
self.nic_data['nic_parts'].update({nic_name:{'serial_no':macaddress}})
else:
print '[!] Error in get_macs(). Message was: %s' % data_err
def get_ips(self):
ip_data = {}
mac_data = {}
for nic in self.nic_data['nic_parts']:
mac = self.nic_data['nic_parts'][nic]['serial_no']
ip_data.update({'device':self.name})
ip_data.update({'tag': nic})
mac_data.update({'device': self.name})
mac_data.update({'port_name': nic})
mac_data.update({'macaddress': mac})
ip_data.update({'macaddress': mac})
cmd = 'ifconfig %s | grep inet' % nic
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
ip = rec.split()[1].strip()
self.nic_data['nic_parts'][nic].update({'ipaddress':ip})
ip_data.update({'ipaddress': ip})
else:
print '[!] Error in get_ips(). Message was: %s' % data_err
self.alldata.append(ip_data)
self.alldata.append(mac_data)
def get_cpu_num(self):
cmd = 'ioscan -fnk|grep proc | wc -l'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
if raw:
cpu_num = raw[0]
self.sysdata.update({'cpucount': cpu_num})
else:
print '[!] Error in get_cpu_num(). Message was: %s' % data_err
def get_disks(self):
cmd = 'ls /dev/rdisk/'
data_out, data_err = self.execute(cmd, False)
if not data_err:
disks = list(set([x.strip().split('_')[0] for x in data_out if x]))
for disk in disks:
cmd = 'diskinfo /dev/rdisk/%s' % disk
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
disk = {}
for rec in raw:
if 'describe of ' in rec: # another disk
if not len(disk) == 0:
self.disk_data['hdd_parts'].append(disk)
disk = {}
else:
if rec.startswith('product id'):
product = rec.split(':')[1].strip()
disk.update({'product': product})
if rec.startswith('size'):
size = int(math.ceil(float(rec.split(':')[1].split()[0].strip()) / 1024 / 1024))
disk.update({'hdd_size': size})
disk.update({'assignment': 'device'})
if self.name:
disk.update({'device': self.name})
self.disk_data['hdd_parts'].append(disk)
else:
print '[!] Error in get_disks(). Message was: %s' % data_err | mit | 6,703,414,946,404,267,000 | 39.721992 | 112 | 0.468915 | false | 4.030874 | false | false | false |
rmcgibbo/openmoltools | openmoltools/amber.py | 1 | 17246 | import mdtraj as md
import tempfile
import logging
import os
import shutil
from distutils.spawn import find_executable
from mdtraj.utils.delay_import import import_
import mdtraj.utils
try:
from subprocess import getoutput # If python 3
except ImportError:
from commands import getoutput # If python 2
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="LOG: %(message)s")
# http://ambermd.org/tutorials/advanced/tutorial15/Tutorial2.xhtml
# Run tLEaP with input file:
# $ tleap -f commands.in
TLEAP_TEMPLATE = """
source leaprc.gaff
source oldff/leaprc.ff99SB
%(mol2_section)s
box = loadPdb %(box_filename)s
%(amberparams_section)s
setbox box centers
saveAmberParm box %(prmtop_filename)s %(inpcrd_filename)s
quit
"""
#loadmol2_section will look something like this:
#BMI = loadmol2 bmi.mol2
#BF4 = loadmol2 bf4.mol2
#ACN = loadmol2 acn.mol2
#loadamberparams_section looks like this:
#loadamberparams frcmod.bf4
#loadamberparams frcmod.bmi
#loadamberparams frcmod.acn
def build_mixture_prmtop(mol2_filenames, frcmod_filenames, box_filename, prmtop_filename, inpcrd_filename, water_model = 'TIP3P'):
"""Create a prmtop and inpcrd from a collection of mol2 and frcmod files
as well as a single box PDB. We have used this for setting up
simulations of neat liquids or binary mixtures.
Parameters
----------
mol2_filenames : list(str)
Filenames of GAFF flavored mol2 files. Each must contain exactly
ONE ligand.
frcmod_filenames : str
Filename of input GAFF frcmod filenames.
box_filename : str
Filename of PDB containing an arbitrary box of the mol2 molecules.
prmtop_filename : str
output prmtop filename. Should have suffix .prmtop
inpcrd_filename : str
output inpcrd filename. Should have suffix .inpcrd
water_model : str, optional. Default: "TIP3P"
String specifying water model to be used IF water is present as a component of the mixture. Valid options are currently "TIP3P", "SPC", or None. If None is specified, flexible GAFF-water will be used as for any other solute (old behavior).
Returns
-------
tleap_commands : str
The string of commands piped to tleap for building the prmtop
and inpcrd files. This will *already* have been run, but the
output can be useful for debugging or archival purposes. However,
this will reflect temporary file names for both input and output
file as these are used to avoid tleap filename restrictions.
Notes
-----
This can be easily broken if there are missing, duplicated, or
inconsistent ligand residue names in your box, mol2, and frcmod files.
You can use mdtraj to edit the residue names with something like
this: trj.top.residue(0).name = "L1"
"""
# Check for one residue name per mol2 file and uniqueness between all mol2 files
all_names = set()
for filename in mol2_filenames:
t = md.load(filename)
names = set([r.name for r in t.top.residues])
if len(names) != 1:
raise(ValueError("Must have a SINGLE residue name in each mol2 file."))
all_names = all_names.union(list(names))
if len(all_names) != len(mol2_filenames):
raise(ValueError("Must have UNIQUE residue names in each mol2 file."))
if len(mol2_filenames) != len(frcmod_filenames):
raise(ValueError("Must provide an equal number of frcmod and mol2 file names."))
#Get number of files
nfiles = len(mol2_filenames)
#Check validity of water model options
valid_water = ['TIP3P', 'SPC', None]
if not water_model in valid_water:
raise(ValueError("Must provide a valid water model."))
#If we are requesting a different water model, check if there is water present
if not water_model==None:
parmed = import_("parmed")
solventIsWater = []
waterPresent = False
for i in range(nfiles):
mol = parmed.load_file( mol2_filenames[i] )
#Check if it is water by checking GAFF atom names
types = [ atom.type for atom in mol.atoms ]
if 'oh' in types and types.count('ho')==2 and len(types)==3:
solventIsWater.append(True)
waterPresent = True
else:
solventIsWater.append(False)
#In this case, if we have any water, we will now work on fewer .mol2 and .frcmod files and instead use the force field files for those. So, reduce nfiles and remove the files we don't need from the .mol2 and .frcmod filename lists
#After doing so, go on to interpret the specified water model and compose the water model string needed for tleap
if waterPresent:
new_mol2_filenames = []
new_frcmod_filenames = []
water_mol2_filenames = []
for i in range( nfiles ):
if not solventIsWater[i]:
new_mol2_filenames.append( mol2_filenames[i] )
new_frcmod_filenames.append( frcmod_filenames[i] )
else:
water_mol2_filenames.append( mol2_filenames[i] )
nfiles = len(new_mol2_filenames)
mol2_filenames = new_mol2_filenames
frcmod_filenames = new_frcmod_filenames
#Now interpret the specified water model and translate into AMBER nomenclature
if water_model=='TIP3P':
water_model = 'TP3'
elif water_model =='SPC':
water_model = 'SPC'
else:
raise(ValueError("Cannot translate specified water model into one of the available models."))
#Compose string for loading specified water molecule
water_string = '\n'
water_names = [md.load(filename).top.residue(0).name for filename in water_mol2_filenames]
for name in water_names:
water_string += '%s = %s\n' % (name, water_model )
#Also if not TIP3P, update to source correct frcmod file
if water_model == 'SPC':
water_string += 'loadamberparams frcmod.spce\n'
elif water_model =='TP3':
continue
else:
raise(ValueError("Cannot identify water frcmod file to be loaded."))
#Rename water atoms in box file to match what is expected by AMBER
packmol = import_("openmoltools.packmol")
packmol.rename_water_atoms(box_filename)
else:
waterPresent = False
#Make temporary, hardcoded filenames for mol2 and frcmod input to avoid tleap filename restrictions
tmp_mol2_filenames = [ 'in%d.mol2' % n for n in range(nfiles) ]
tmp_frcmod_filenames = [ 'in%d.frcmod' % n for n in range(nfiles) ]
#Make temporary, hardcoded filenames for output files to avoid tleap filename restrictions
tmp_prmtop_filename = 'out.prmtop'
tmp_inpcrd_filename = 'out.inpcrd'
tmp_box_filename = 'tbox.pdb'
#Build absolute paths of input files so we can use context and temporary directory
infiles = mol2_filenames + frcmod_filenames + [box_filename]
infiles = [ os.path.abspath(filenm) for filenm in infiles ]
#Build absolute paths of output files so we can copy them back
prmtop_filename = os.path.abspath( prmtop_filename )
inpcrd_filename = os.path.abspath( inpcrd_filename )
#Use temporary directory and do the setup
with mdtraj.utils.enter_temp_directory():
#Copy input files to temporary file names in target directory
for (infile, outfile) in zip( infiles, tmp_mol2_filenames+tmp_frcmod_filenames+[tmp_box_filename] ):
shutil.copy( infile, outfile)
logger.debug('Copying input file %s to %s...\n' % (infile, outfile))
all_names = [md.load(filename).top.residue(0).name for filename in tmp_mol2_filenames]
mol2_section = "\n".join("%s = loadmol2 %s" % (all_names[k], filename) for k, filename in enumerate(tmp_mol2_filenames))
#If non-GAFF water is present, load desired parameters for that water as well.
if waterPresent:
mol2_section += water_string
amberparams_section = "\n".join("loadamberparams %s" % (filename) for k, filename in enumerate(tmp_frcmod_filenames))
tleap_commands = TLEAP_TEMPLATE % dict(mol2_section=mol2_section, amberparams_section=amberparams_section, box_filename=tmp_box_filename, prmtop_filename=tmp_prmtop_filename, inpcrd_filename=tmp_inpcrd_filename)
print(tleap_commands)
file_handle = tempfile.NamedTemporaryFile('w') # FYI Py3K defaults to 'wb' mode, which won't work here.
file_handle.writelines(tleap_commands)
file_handle.flush()
logger.debug('Running tleap in temporary directory.')
cmd = "tleap -f %s " % file_handle.name
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
check_for_errors( output, other_errors = ['Improper number of arguments'], ignore_errors = ['unperturbed charge of the unit', 'ignoring the error'] )
file_handle.close()
#Copy stuff back to right filenames
for (tfile, finalfile) in zip( [tmp_prmtop_filename, tmp_inpcrd_filename], [prmtop_filename, inpcrd_filename] ):
shutil.copy( tfile, finalfile)
return tleap_commands
def check_for_errors( outputtext, other_errors = None, ignore_errors = None ):
"""Check AMBER package output for the string 'ERROR' (upper or lowercase) and (optionally) specified other strings and raise an exception if it is found (to avoid silent failures which might be noted to log but otherwise ignored).
Parameters
----------
outputtext : str
String listing output text from an (AMBER) command which should be checked for errors.
other_errors : list(str), default None
If specified, provide strings for other errors which will be chcked for, such as "improper number of arguments", etc.
ignore_errors: list(str), default None
If specified, AMBER output lines containing errors but also containing any of the specified strings will be ignored (because, for example, AMBER issues an "ERROR" for non-integer charges in some cases when only a warning is needed).
Notes
-----
If error(s) are found, raise a RuntimeError and attept to print the appropriate errors from the processed text."""
lines = outputtext.split('\n')
error_lines = []
for line in lines:
if 'ERROR' in line.upper():
error_lines.append( line )
if not other_errors == None:
for err in other_errors:
if err.upper() in line.upper():
error_lines.append( line )
if not ignore_errors == None and len(error_lines)>0:
new_error_lines = []
for ign in ignore_errors:
ignore = False
for err in error_lines:
if ign in err:
ignore = True
if not ignore:
new_error_lines.append( err )
error_lines = new_error_lines
if len(error_lines) > 0:
print("Unexpected errors encountered running AMBER tool. Offending output:")
for line in error_lines: print(line)
raise(RuntimeError("Error encountered running AMBER tool. Exiting."))
return
def find_gaff_dat():
AMBERHOME = None
try:
AMBERHOME = os.environ['AMBERHOME']
except KeyError:
pass
if AMBERHOME is None:
full_path = find_executable("parmchk2")
try:
AMBERHOME = os.path.split(full_path)[0]
AMBERHOME = os.path.join(AMBERHOME, "../")
except:
raise(ValueError("Cannot find AMBER GAFF"))
if AMBERHOME is None:
raise(ValueError("Cannot find AMBER GAFF"))
return os.path.join(AMBERHOME, 'dat', 'leap', 'parm', 'gaff.dat')
GAFF_DAT_FILENAME = find_gaff_dat()
def run_antechamber(molecule_name, input_filename, charge_method="bcc", net_charge=None, gaff_mol2_filename=None, frcmod_filename=None):
"""Run AmberTools antechamber and parmchk2 to create GAFF mol2 and frcmod files.
Parameters
----------
molecule_name : str
Name of the molecule to be parameterized, will be used in output filenames.
ligand_filename : str
The molecule to be parameterized. Must be tripos mol2 format.
charge_method : str, optional
If not None, the charge method string will be passed to Antechamber.
net_charge : int, optional
If not None, net charge of the molecule to be parameterized.
If None, Antechamber sums up partial charges from the input file.
gaff_mol2_filename : str, optional, default=None
Name of GAFF mol2 filename to output. If None, uses local directory
and molecule_name
frcmod_filename : str, optional, default=None
Name of GAFF frcmod filename to output. If None, uses local directory
and molecule_name
Returns
-------
gaff_mol2_filename : str
GAFF format mol2 filename produced by antechamber
frcmod_filename : str
Amber frcmod file produced by prmchk
"""
utils = import_("openmoltools.utils")
ext = utils.parse_ligand_filename(input_filename)[1]
filetype = ext[1:]
if filetype != "mol2":
raise(ValueError("Must input mol2 filename"))
if gaff_mol2_filename is None:
gaff_mol2_filename = molecule_name + '.gaff.mol2'
if frcmod_filename is None:
frcmod_filename = molecule_name + '.frcmod'
#Build absolute paths for input and output files
gaff_mol2_filename = os.path.abspath( gaff_mol2_filename )
frcmod_filename = os.path.abspath( frcmod_filename )
input_filename = os.path.abspath( input_filename )
#Use temporary directory context to do this to avoid issues with spaces in filenames, etc.
with mdtraj.utils.enter_temp_directory():
shutil.copy( input_filename, 'in.mol2' )
cmd = "antechamber -i in.mol2 -fi mol2 -o out.mol2 -fo mol2 -s 2"
if charge_method is not None:
cmd += ' -c %s' % charge_method
if net_charge is not None:
cmd += ' -nc %d' % net_charge
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
cmd = "parmchk2 -i out.mol2 -f mol2 -o out.frcmod"
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
check_for_errors( output )
#Copy back
shutil.copy( 'out.mol2', gaff_mol2_filename )
shutil.copy( 'out.frcmod', frcmod_filename )
return gaff_mol2_filename, frcmod_filename
def run_tleap(molecule_name, gaff_mol2_filename, frcmod_filename, prmtop_filename=None, inpcrd_filename=None):
"""Run AmberTools tleap to create simulation files for AMBER
Parameters
----------
molecule_name : str
The name of the molecule
gaff_mol2_filename : str
GAFF format mol2 filename produced by antechamber
frcmod_filename : str
Amber frcmod file produced by prmchk
prmtop_filename : str, optional, default=None
Amber prmtop file produced by tleap, defaults to molecule_name
inpcrd_filename : str, optional, default=None
Amber inpcrd file produced by tleap, defaults to molecule_name
Returns
-------
prmtop_filename : str
Amber prmtop file produced by tleap
inpcrd_filename : str
Amber inpcrd file produced by tleap
"""
if prmtop_filename is None:
prmtop_filename = "%s.prmtop" % molecule_name
if inpcrd_filename is None:
inpcrd_filename = "%s.inpcrd" % molecule_name
#Get absolute paths for input/output
gaff_mol2_filename = os.path.abspath( gaff_mol2_filename )
frcmod_filename = os.path.abspath( frcmod_filename )
prmtop_filename = os.path.abspath( prmtop_filename )
inpcrd_filename = os.path.abspath( inpcrd_filename )
#Work in a temporary directory, on hard coded filenames, to avoid any issues AMBER may have with spaces and other special characters in filenames
with mdtraj.utils.enter_temp_directory():
shutil.copy( gaff_mol2_filename, 'file.mol2' )
shutil.copy( frcmod_filename, 'file.frcmod' )
tleap_input = """
source oldff/leaprc.ff99SB
source leaprc.gaff
LIG = loadmol2 file.mol2
check LIG
loadamberparams file.frcmod
saveamberparm LIG out.prmtop out.inpcrd
quit
"""
file_handle = tempfile.NamedTemporaryFile('w') # FYI Py3K defaults to 'wb' mode, which won't work here.
file_handle.writelines(tleap_input)
file_handle.flush()
cmd = "tleap -f %s " % file_handle.name
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
check_for_errors( output, other_errors = ['Improper number of arguments'] )
file_handle.close()
#Copy back target files
shutil.copy( 'out.prmtop', prmtop_filename )
shutil.copy( 'out.inpcrd', inpcrd_filename )
return prmtop_filename, inpcrd_filename
| gpl-2.0 | 6,290,256,106,830,964,000 | 38.555046 | 247 | 0.650586 | false | 3.757298 | false | false | false |
nimbusproject/dashi | dashi/bootstrap/containers.py | 1 | 5837 | #!/usr/bin/env python
__author__ = 'Adam R. Smith'
__license__ = 'Apache 2.0'
import collections
class DotNotationGetItem(object):
""" Drive the behavior for DotList and DotDict lookups by dot notation, JSON-style. """
def _convert(self, val):
""" Convert the type if necessary and return if a conversion happened. """
if isinstance(val, dict) and not isinstance(val, DotDict):
return DotDict(val), True
elif isinstance(val, list) and not isinstance(val, DotList):
return DotList(val), True
return val, False
def __getitem__(self, key):
val = super(DotNotationGetItem, self).__getitem__(key)
val, converted = self._convert(val)
if converted: self[key] = val
return val
class DotList(DotNotationGetItem, list):
""" Partner class for DotDict; see that for docs. Both are needed to fully support JSON/YAML blocks. """
#def DotListIterator(list.)
def __iter__(self):
""" Monkey-patch the "next" iterator method to return modified versions. This will be slow. """
#it = super(DotList, self).__iter__()
#it_next = getattr(it, 'next')
#setattr(it, 'next', lambda: it_next(it))
#return it
for val in super(DotList, self).__iter__():
val, converted = self._convert(val)
yield val
class DotDict(DotNotationGetItem, dict):
"""
Subclass of dict that will recursively look up attributes with dot notation.
This is primarily for working with JSON-style data in a cleaner way like javascript.
Note that this will instantiate a number of child DotDicts when you first access attributes;
do not use in performance-critical parts of your code.
"""
def __getattr__(self, key):
""" Make attempts to lookup by nonexistent attributes also attempt key lookups. """
try:
val = self.__getitem__(key)
except KeyError:
raise AttributeError(key)
return val
def copy(self):
return DotDict(dict.copy(self))
@classmethod
def fromkeys(cls, seq, value=None):
return DotDict(dict.fromkeys(seq, value))
class DictModifier(DotDict):
"""
Subclass of DotDict that allows the sparse overriding of dict values.
"""
def __init__(self, base, data=None):
# base should be a DotDict, raise TypeError exception if not
if not isinstance(base, DotDict):
raise TypeError("Base must be of type DotDict")
self.base = base
if data is not None:
self.update(data)
def __getattr__(self, key):
try:
return DotDict.__getattr__(self, key)
except AttributeError, ae:
# Delegate to base
return getattr(self.base, key)
def __getitem__(self, key):
try:
return DotDict.__getitem__(self, key)
except KeyError, ke:
# Delegate to base
return getattr(self.base, key)
# dict_merge from: http://appdelegateinc.com/blog/2011/01/12/merge-deeply-nested-dicts-in-python/
def quacks_like_dict(object):
"""Check if object is dict-like"""
return isinstance(object, collections.Mapping)
def dict_merge(a, b):
"""Merge two deep dicts non-destructively
Uses a stack to avoid maximum recursion depth exceptions
>>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}
>>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}
>>> c = merge(a, b)
>>> from pprint import pprint; pprint(c)
{'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
"""
assert quacks_like_dict(a), quacks_like_dict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
def named_any(name):
"""
Retrieve a Python object by its fully qualified name from the global Python
module namespace. The first part of the name, that describes a module,
will be discovered and imported. Each subsequent part of the name is
treated as the name of an attribute of the object specified by all of the
name which came before it.
@param name: The name of the object to return.
@return: the Python object identified by 'name'.
"""
assert name, 'Empty module name'
names = name.split('.')
topLevelPackage = None
moduleNames = names[:]
while not topLevelPackage:
if moduleNames:
trialname = '.'.join(moduleNames)
try:
topLevelPackage = __import__(trialname)
except Exception, ex:
moduleNames.pop()
else:
if len(names) == 1:
raise Exception("No module named %r" % (name,))
else:
raise Exception('%r does not name an object' % (name,))
obj = topLevelPackage
for n in names[1:]:
obj = getattr(obj, n)
return obj
def for_name(modpath, classname):
'''
Returns a class of "classname" from module "modname".
'''
module = __import__(modpath, fromlist=[classname])
classobj = getattr(module, classname)
return classobj()
if __name__ == '__main__':
dd = DotDict({'a':{'b':{'c':1, 'd':2}}})
print dd.a.b.c, dd.a.b.d
print dd.a.b
#print dd.foo
print dict.fromkeys(('a','b','c'), 'foo')
print DotDict.fromkeys(('a','b','c'), 'foo').a
dl = DotList([1, {'a':{'b':{'c':1, 'd':2}}}])
print dl[1].a.b.c
| apache-2.0 | 2,652,685,822,307,659,300 | 31.433333 | 108 | 0.588145 | false | 3.832567 | false | false | false |
nuagenetworks/vspk-python | vspk/v6/nusubnet.py | 1 | 58844 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPATIPEntriesFetcher
from .fetchers import NUTCAsFetcher
from .fetchers import NUAddressRangesFetcher
from .fetchers import NUDefaultGatewaysFetcher
from .fetchers import NUDeploymentFailuresFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUVMResyncsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUBGPNeighborsFetcher
from .fetchers import NUDHCPOptionsFetcher
from .fetchers import NUDHCPv6OptionsFetcher
from .fetchers import NUVirtualIPsFetcher
from .fetchers import NUIKEGatewayConnectionsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUVMsFetcher
from .fetchers import NUVMInterfacesFetcher
from .fetchers import NUVMIPReservationsFetcher
from .fetchers import NUEnterprisePermissionsFetcher
from .fetchers import NUContainersFetcher
from .fetchers import NUContainerInterfacesFetcher
from .fetchers import NUContainerResyncsFetcher
from .fetchers import NUQOSsFetcher
from .fetchers import NUVPortsFetcher
from .fetchers import NUIPReservationsFetcher
from .fetchers import NUProxyARPFiltersFetcher
from .fetchers import NUStatisticsFetcher
from .fetchers import NUStatisticsPoliciesFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUSubnet(NURESTObject):
""" Represents a Subnet in the VSD
Notes:
This is the definition of a subnet associated with a Zone.
"""
__rest_name__ = "subnet"
__resource_name__ = "subnets"
## Constants
CONST_PAT_ENABLED_DISABLED = "DISABLED"
CONST_PAT_ENABLED_INHERITED = "INHERITED"
CONST_USE_GLOBAL_MAC_DISABLED = "DISABLED"
CONST_RESOURCE_TYPE_FLOATING = "FLOATING"
CONST_RESOURCE_TYPE_NSG_VNF = "NSG_VNF"
CONST_DPI_ENABLED = "ENABLED"
CONST_DHCP_RELAY_STATUS_DISABLED = "DISABLED"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_DPI_INHERITED = "INHERITED"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_UNDERLAY_ENABLED_ENABLED = "ENABLED"
CONST_MAINTENANCE_MODE_DISABLED = "DISABLED"
CONST_RESOURCE_TYPE_STANDARD = "STANDARD"
CONST_USE_GLOBAL_MAC_ENABLED = "ENABLED"
CONST_MAINTENANCE_MODE_ENABLED = "ENABLED"
CONST_L2_ENCAP_TYPE_VLAN = "VLAN"
CONST_RESOURCE_TYPE_PUBLIC = "PUBLIC"
CONST_UNDERLAY_ENABLED_INHERITED = "INHERITED"
CONST_L2_ENCAP_TYPE_VXLAN = "VXLAN"
CONST_USE_GLOBAL_MAC_ENTERPRISE_DEFAULT = "ENTERPRISE_DEFAULT"
CONST_ENCRYPTION_INHERITED = "INHERITED"
CONST_ENTITY_STATE_UNDER_CONSTRUCTION = "UNDER_CONSTRUCTION"
CONST_PAT_ENABLED_ENABLED = "ENABLED"
CONST_MULTICAST_ENABLED = "ENABLED"
CONST_MULTICAST_INHERITED = "INHERITED"
CONST_L2_ENCAP_TYPE_MPLSOUDP = "MPLSoUDP"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_DHCP_RELAY_STATUS_ENABLED = "ENABLED"
CONST_MULTICAST_DISABLED = "DISABLED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_L2_ENCAP_TYPE_MPLS = "MPLS"
CONST_ENCRYPTION_DISABLED = "DISABLED"
CONST_DPI_DISABLED = "DISABLED"
CONST_MAINTENANCE_MODE_ENABLED_INHERITED = "ENABLED_INHERITED"
CONST_ENCRYPTION_ENABLED = "ENABLED"
CONST_IP_TYPE_DUALSTACK = "DUALSTACK"
CONST_ENTITY_STATE_MARKED_FOR_DELETION = "MARKED_FOR_DELETION"
CONST_UNDERLAY_ENABLED_DISABLED = "DISABLED"
def __init__(self, **kwargs):
""" Initializes a Subnet instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> subnet = NUSubnet(id=u'xxxx-xxx-xxx-xxx', name=u'Subnet')
>>> subnet = NUSubnet(data=my_dict)
"""
super(NUSubnet, self).__init__()
# Read/Write Attributes
self._l2_encap_type = None
self._pat_enabled = None
self._dhcp_relay_status = None
self._dpi = None
self._ip_type = None
self._ipv6_address = None
self._ipv6_gateway = None
self._evpn_enabled = None
self._maintenance_mode = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway = None
self._gateway_mac_address = None
self._access_restriction_enabled = None
self._address = None
self._advertise = None
self._template_id = None
self._service_id = None
self._description = None
self._resource_type = None
self._netmask = None
self._link_local_address = None
self._embedded_metadata = None
self._vn_id = None
self._enable_dhcpv4 = None
self._enable_dhcpv6 = None
self._encryption = None
self._underlay = None
self._underlay_enabled = None
self._ingress_replication_enabled = None
self._interface_id = None
self._entity_scope = None
self._entity_state = None
self._policy_group_id = None
self._color = None
self._domain_service_label = None
self._route_distinguisher = None
self._route_target = None
self._split_subnet = None
self._irb_sub_interface_id = None
self._creation_date = None
self._proxy_arp = None
self._vrrp_ipv6_backup_address = None
self._use_global_mac = None
self._associated_multicast_channel_map_id = None
self._associated_shared_network_resource_id = None
self._dual_stack_dynamic_ip_allocation = None
self._public = None
self._subnet_vlanid = None
self._multi_home_enabled = None
self._multicast = None
self._customer_id = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="l2_encap_type", remote_name="l2EncapType", attribute_type=str, is_required=False, is_unique=False, choices=[u'MPLS', u'MPLSoUDP', u'VLAN', u'VXLAN'])
self.expose_attribute(local_name="pat_enabled", remote_name="PATEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="dhcp_relay_status", remote_name="DHCPRelayStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED'])
self.expose_attribute(local_name="dpi", remote_name="DPI", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_gateway", remote_name="IPv6Gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="evpn_enabled", remote_name="EVPNEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="maintenance_mode", remote_name="maintenanceMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENABLED_INHERITED'])
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_mac_address", remote_name="gatewayMACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="access_restriction_enabled", remote_name="accessRestrictionEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="advertise", remote_name="advertise", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="service_id", remote_name="serviceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="resource_type", remote_name="resourceType", attribute_type=str, is_required=False, is_unique=False, choices=[u'FLOATING', u'NSG_VNF', u'PUBLIC', u'STANDARD'])
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="link_local_address", remote_name="linkLocalAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="vn_id", remote_name="vnId", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="enable_dhcpv4", remote_name="enableDHCPv4", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enable_dhcpv6", remote_name="enableDHCPv6", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="encryption", remote_name="encryption", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="underlay", remote_name="underlay", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="underlay_enabled", remote_name="underlayEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="ingress_replication_enabled", remote_name="ingressReplicationEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="interface_id", remote_name="interfaceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="entity_state", remote_name="entityState", attribute_type=str, is_required=False, is_unique=False, choices=[u'MARKED_FOR_DELETION', u'UNDER_CONSTRUCTION'])
self.expose_attribute(local_name="policy_group_id", remote_name="policyGroupID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="color", remote_name="color", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="domain_service_label", remote_name="domainServiceLabel", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="route_distinguisher", remote_name="routeDistinguisher", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="route_target", remote_name="routeTarget", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="split_subnet", remote_name="splitSubnet", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="irb_sub_interface_id", remote_name="irbSubInterfaceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="proxy_arp", remote_name="proxyARP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="vrrp_ipv6_backup_address", remote_name="vrrpIPv6BackupAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_global_mac", remote_name="useGlobalMAC", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENTERPRISE_DEFAULT'])
self.expose_attribute(local_name="associated_multicast_channel_map_id", remote_name="associatedMulticastChannelMapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_shared_network_resource_id", remote_name="associatedSharedNetworkResourceID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dual_stack_dynamic_ip_allocation", remote_name="dualStackDynamicIPAllocation", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="public", remote_name="public", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="subnet_vlanid", remote_name="subnetVLANID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="multi_home_enabled", remote_name="multiHomeEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="multicast", remote_name="multicast", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="customer_id", remote_name="customerID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.patip_entries = NUPATIPEntriesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.tcas = NUTCAsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.address_ranges = NUAddressRangesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.default_gateways = NUDefaultGatewaysFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_resyncs = NUVMResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.bgp_neighbors = NUBGPNeighborsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcp_options = NUDHCPOptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcpv6_options = NUDHCPv6OptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.virtual_ips = NUVirtualIPsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ike_gateway_connections = NUIKEGatewayConnectionsFetcher.fetcher_with_object(parent_object=self, relationship="member")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vms = NUVMsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_interfaces = NUVMInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vmip_reservations = NUVMIPReservationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.containers = NUContainersFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.container_interfaces = NUContainerInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.container_resyncs = NUContainerResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.qoss = NUQOSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vports = NUVPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ip_reservations = NUIPReservationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.proxy_arp_filters = NUProxyARPFiltersFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics = NUStatisticsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics_policies = NUStatisticsPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def l2_encap_type(self):
""" Get l2_encap_type value.
Notes:
Subnet Tunnel Type, possible values are MPLS, MPLSoUDP, VLAN and VXLAN.
This attribute is named `l2EncapType` in VSD API.
"""
return self._l2_encap_type
@l2_encap_type.setter
def l2_encap_type(self, value):
""" Set l2_encap_type value.
Notes:
Subnet Tunnel Type, possible values are MPLS, MPLSoUDP, VLAN and VXLAN.
This attribute is named `l2EncapType` in VSD API.
"""
self._l2_encap_type = value
@property
def pat_enabled(self):
""" Get pat_enabled value.
Notes:
Indicates whether PAT is enabled for the subnets in this domain - ENABLED/DISABLED. Possible values are ENABLED, DISABLED. OR INHERITED
This attribute is named `PATEnabled` in VSD API.
"""
return self._pat_enabled
@pat_enabled.setter
def pat_enabled(self, value):
""" Set pat_enabled value.
Notes:
Indicates whether PAT is enabled for the subnets in this domain - ENABLED/DISABLED. Possible values are ENABLED, DISABLED. OR INHERITED
This attribute is named `PATEnabled` in VSD API.
"""
self._pat_enabled = value
@property
def dhcp_relay_status(self):
""" Get dhcp_relay_status value.
Notes:
None
This attribute is named `DHCPRelayStatus` in VSD API.
"""
return self._dhcp_relay_status
@dhcp_relay_status.setter
def dhcp_relay_status(self, value):
""" Set dhcp_relay_status value.
Notes:
None
This attribute is named `DHCPRelayStatus` in VSD API.
"""
self._dhcp_relay_status = value
@property
def dpi(self):
""" Get dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
return self._dpi
@dpi.setter
def dpi(self, value):
""" Set dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
self._dpi = value
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4, DUALSTACK or IPv6
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4, DUALSTACK or IPv6
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def ipv6_gateway(self):
""" Get ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
return self._ipv6_gateway
@ipv6_gateway.setter
def ipv6_gateway(self, value):
""" Set ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
self._ipv6_gateway = value
@property
def evpn_enabled(self):
""" Get evpn_enabled value.
Notes:
Indicates if EVPN capabilities are enabled for this subnet.
This attribute is named `EVPNEnabled` in VSD API.
"""
return self._evpn_enabled
@evpn_enabled.setter
def evpn_enabled(self, value):
""" Set evpn_enabled value.
Notes:
Indicates if EVPN capabilities are enabled for this subnet.
This attribute is named `EVPNEnabled` in VSD API.
"""
self._evpn_enabled = value
@property
def maintenance_mode(self):
""" Get maintenance_mode value.
Notes:
maintenanceMode is an enum that indicates if the SubNetwork is accepting VM activation requests.
This attribute is named `maintenanceMode` in VSD API.
"""
return self._maintenance_mode
@maintenance_mode.setter
def maintenance_mode(self, value):
""" Set maintenance_mode value.
Notes:
maintenanceMode is an enum that indicates if the SubNetwork is accepting VM activation requests.
This attribute is named `maintenanceMode` in VSD API.
"""
self._maintenance_mode = value
@property
def name(self):
""" Get name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway(self):
""" Get gateway value.
Notes:
The IP address of the gateway of this subnet
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
The IP address of the gateway of this subnet
"""
self._gateway = value
@property
def gateway_mac_address(self):
""" Get gateway_mac_address value.
Notes:
None
This attribute is named `gatewayMACAddress` in VSD API.
"""
return self._gateway_mac_address
@gateway_mac_address.setter
def gateway_mac_address(self, value):
""" Set gateway_mac_address value.
Notes:
None
This attribute is named `gatewayMACAddress` in VSD API.
"""
self._gateway_mac_address = value
@property
def access_restriction_enabled(self):
""" Get access_restriction_enabled value.
Notes:
This attribute specifies whether subnet is enabled with access restrictions. Note: Applicable to shared infrastructure enterprise subnets.
This attribute is named `accessRestrictionEnabled` in VSD API.
"""
return self._access_restriction_enabled
@access_restriction_enabled.setter
def access_restriction_enabled(self, value):
""" Set access_restriction_enabled value.
Notes:
This attribute specifies whether subnet is enabled with access restrictions. Note: Applicable to shared infrastructure enterprise subnets.
This attribute is named `accessRestrictionEnabled` in VSD API.
"""
self._access_restriction_enabled = value
@property
def address(self):
""" Get address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
self._address = value
@property
def advertise(self):
""" Get advertise value.
Notes:
Subnet will be advertised in Overlay and WAN BGP
"""
return self._advertise
@advertise.setter
def advertise(self, value):
""" Set advertise value.
Notes:
Subnet will be advertised in Overlay and WAN BGP
"""
self._advertise = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the subnet template that this subnet object was derived from
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the subnet template that this subnet object was derived from
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def service_id(self):
""" Get service_id value.
Notes:
The service ID used by the VSCs to identify this subnet
This attribute is named `serviceID` in VSD API.
"""
return self._service_id
@service_id.setter
def service_id(self, value):
""" Set service_id value.
Notes:
The service ID used by the VSCs to identify this subnet
This attribute is named `serviceID` in VSD API.
"""
self._service_id = value
@property
def description(self):
""" Get description value.
Notes:
A description field provided by the user that identifies the subnet
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description field provided by the user that identifies the subnet
"""
self._description = value
@property
def resource_type(self):
""" Get resource_type value.
Notes:
Defines the type of the subnet, PUBLIC,FLOATING,STANDARD OR NSG_VNF
This attribute is named `resourceType` in VSD API.
"""
return self._resource_type
@resource_type.setter
def resource_type(self, value):
""" Set resource_type value.
Notes:
Defines the type of the subnet, PUBLIC,FLOATING,STANDARD OR NSG_VNF
This attribute is named `resourceType` in VSD API.
"""
self._resource_type = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask of the subnet defined
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask of the subnet defined
"""
self._netmask = value
@property
def link_local_address(self):
""" Get link_local_address value.
Notes:
IPv6 unicast address. Valid range is fe80::/64.
This attribute is named `linkLocalAddress` in VSD API.
"""
return self._link_local_address
@link_local_address.setter
def link_local_address(self, value):
""" Set link_local_address value.
Notes:
IPv6 unicast address. Valid range is fe80::/64.
This attribute is named `linkLocalAddress` in VSD API.
"""
self._link_local_address = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def vn_id(self):
""" Get vn_id value.
Notes:
Current Network's globally unique VXLAN network identifier generated by VSD
This attribute is named `vnId` in VSD API.
"""
return self._vn_id
@vn_id.setter
def vn_id(self, value):
""" Set vn_id value.
Notes:
Current Network's globally unique VXLAN network identifier generated by VSD
This attribute is named `vnId` in VSD API.
"""
self._vn_id = value
@property
def enable_dhcpv4(self):
""" Get enable_dhcpv4 value.
Notes:
Turn on or off DHCP for of IPV4 Addresses
This attribute is named `enableDHCPv4` in VSD API.
"""
return self._enable_dhcpv4
@enable_dhcpv4.setter
def enable_dhcpv4(self, value):
""" Set enable_dhcpv4 value.
Notes:
Turn on or off DHCP for of IPV4 Addresses
This attribute is named `enableDHCPv4` in VSD API.
"""
self._enable_dhcpv4 = value
@property
def enable_dhcpv6(self):
""" Get enable_dhcpv6 value.
Notes:
Turn on or off DHCP for IPv6 Addresses
This attribute is named `enableDHCPv6` in VSD API.
"""
return self._enable_dhcpv6
@enable_dhcpv6.setter
def enable_dhcpv6(self, value):
""" Set enable_dhcpv6 value.
Notes:
Turn on or off DHCP for IPv6 Addresses
This attribute is named `enableDHCPv6` in VSD API.
"""
self._enable_dhcpv6 = value
@property
def encryption(self):
""" Get encryption value.
Notes:
Determines whether or not IPSEC is enabled.
"""
return self._encryption
@encryption.setter
def encryption(self, value):
""" Set encryption value.
Notes:
Determines whether or not IPSEC is enabled.
"""
self._encryption = value
@property
def underlay(self):
""" Get underlay value.
Notes:
Read Only Boolean flag to indicate whether underlay is enabled directly or indirectly
"""
return self._underlay
@underlay.setter
def underlay(self, value):
""" Set underlay value.
Notes:
Read Only Boolean flag to indicate whether underlay is enabled directly or indirectly
"""
self._underlay = value
@property
def underlay_enabled(self):
""" Get underlay_enabled value.
Notes:
Indicates whether UNDERLAY is enabled for the subnets in this domain
This attribute is named `underlayEnabled` in VSD API.
"""
return self._underlay_enabled
@underlay_enabled.setter
def underlay_enabled(self, value):
""" Set underlay_enabled value.
Notes:
Indicates whether UNDERLAY is enabled for the subnets in this domain
This attribute is named `underlayEnabled` in VSD API.
"""
self._underlay_enabled = value
@property
def ingress_replication_enabled(self):
""" Get ingress_replication_enabled value.
Notes:
Enables ingress replication for the VNI.
This attribute is named `ingressReplicationEnabled` in VSD API.
"""
return self._ingress_replication_enabled
@ingress_replication_enabled.setter
def ingress_replication_enabled(self, value):
""" Set ingress_replication_enabled value.
Notes:
Enables ingress replication for the VNI.
This attribute is named `ingressReplicationEnabled` in VSD API.
"""
self._ingress_replication_enabled = value
@property
def interface_id(self):
""" Get interface_id value.
Notes:
SRLinux Interface ID for Subnet configuration
This attribute is named `interfaceID` in VSD API.
"""
return self._interface_id
@interface_id.setter
def interface_id(self, value):
""" Set interface_id value.
Notes:
SRLinux Interface ID for Subnet configuration
This attribute is named `interfaceID` in VSD API.
"""
self._interface_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def entity_state(self):
""" Get entity_state value.
Notes:
Intermediate State of L2 Domain.
This attribute is named `entityState` in VSD API.
"""
return self._entity_state
@entity_state.setter
def entity_state(self, value):
""" Set entity_state value.
Notes:
Intermediate State of L2 Domain.
This attribute is named `entityState` in VSD API.
"""
self._entity_state = value
@property
def policy_group_id(self):
""" Get policy_group_id value.
Notes:
PG ID for the subnet. This is unique per domain and will be in the range 1-4095
This attribute is named `policyGroupID` in VSD API.
"""
return self._policy_group_id
@policy_group_id.setter
def policy_group_id(self, value):
""" Set policy_group_id value.
Notes:
PG ID for the subnet. This is unique per domain and will be in the range 1-4095
This attribute is named `policyGroupID` in VSD API.
"""
self._policy_group_id = value
@property
def color(self):
""" Get color value.
Notes:
The color encoded with a traffic engineering constraint such as minimum latency, hops, maximum bandwidth, etc. This is used for NFIX(Network Function Interconnect). Color is applicable only when the associated Domain's Tunnel Type is MPLSoUDP. Valid range is 1 - 4294967295. If 0 is provided, color will be derived from the associated Domain.
"""
return self._color
@color.setter
def color(self, value):
""" Set color value.
Notes:
The color encoded with a traffic engineering constraint such as minimum latency, hops, maximum bandwidth, etc. This is used for NFIX(Network Function Interconnect). Color is applicable only when the associated Domain's Tunnel Type is MPLSoUDP. Valid range is 1 - 4294967295. If 0 is provided, color will be derived from the associated Domain.
"""
self._color = value
@property
def domain_service_label(self):
""" Get domain_service_label value.
Notes:
Service ID or external label given to Domain
This attribute is named `domainServiceLabel` in VSD API.
"""
return self._domain_service_label
@domain_service_label.setter
def domain_service_label(self, value):
""" Set domain_service_label value.
Notes:
Service ID or external label given to Domain
This attribute is named `domainServiceLabel` in VSD API.
"""
self._domain_service_label = value
@property
def route_distinguisher(self):
""" Get route_distinguisher value.
Notes:
Route distinguisher for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeDistinguisher` in VSD API.
"""
return self._route_distinguisher
@route_distinguisher.setter
def route_distinguisher(self, value):
""" Set route_distinguisher value.
Notes:
Route distinguisher for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeDistinguisher` in VSD API.
"""
self._route_distinguisher = value
@property
def route_target(self):
""" Get route_target value.
Notes:
Route target for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeTarget` in VSD API.
"""
return self._route_target
@route_target.setter
def route_target(self, value):
""" Set route_target value.
Notes:
Route target for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeTarget` in VSD API.
"""
self._route_target = value
@property
def split_subnet(self):
""" Get split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
return self._split_subnet
@split_subnet.setter
def split_subnet(self, value):
""" Set split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
self._split_subnet = value
@property
def irb_sub_interface_id(self):
""" Get irb_sub_interface_id value.
Notes:
The IRB sub interface identifies subnet on SRLinux devices.
This attribute is named `irbSubInterfaceID` in VSD API.
"""
return self._irb_sub_interface_id
@irb_sub_interface_id.setter
def irb_sub_interface_id(self, value):
""" Set irb_sub_interface_id value.
Notes:
The IRB sub interface identifies subnet on SRLinux devices.
This attribute is named `irbSubInterfaceID` in VSD API.
"""
self._irb_sub_interface_id = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def proxy_arp(self):
""" Get proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
return self._proxy_arp
@proxy_arp.setter
def proxy_arp(self, value):
""" Set proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
self._proxy_arp = value
@property
def vrrp_ipv6_backup_address(self):
""" Get vrrp_ipv6_backup_address value.
Notes:
7x50 VRRP IPv6 Backup Address. Valid range is fe80::/64.
This attribute is named `vrrpIPv6BackupAddress` in VSD API.
"""
return self._vrrp_ipv6_backup_address
@vrrp_ipv6_backup_address.setter
def vrrp_ipv6_backup_address(self, value):
""" Set vrrp_ipv6_backup_address value.
Notes:
7x50 VRRP IPv6 Backup Address. Valid range is fe80::/64.
This attribute is named `vrrpIPv6BackupAddress` in VSD API.
"""
self._vrrp_ipv6_backup_address = value
@property
def use_global_mac(self):
""" Get use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
return self._use_global_mac
@use_global_mac.setter
def use_global_mac(self, value):
""" Set use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
self._use_global_mac = value
@property
def associated_multicast_channel_map_id(self):
""" Get associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
return self._associated_multicast_channel_map_id
@associated_multicast_channel_map_id.setter
def associated_multicast_channel_map_id(self, value):
""" Set associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
self._associated_multicast_channel_map_id = value
@property
def associated_shared_network_resource_id(self):
""" Get associated_shared_network_resource_id value.
Notes:
The ID of public subnet that is associated with this subnet
This attribute is named `associatedSharedNetworkResourceID` in VSD API.
"""
return self._associated_shared_network_resource_id
@associated_shared_network_resource_id.setter
def associated_shared_network_resource_id(self, value):
""" Set associated_shared_network_resource_id value.
Notes:
The ID of public subnet that is associated with this subnet
This attribute is named `associatedSharedNetworkResourceID` in VSD API.
"""
self._associated_shared_network_resource_id = value
@property
def dual_stack_dynamic_ip_allocation(self):
""" Get dual_stack_dynamic_ip_allocation value.
Notes:
This value indicates whether dynamic address allocation is enabled or not. This will be applicable when subnet is in dual stack mode.
This attribute is named `dualStackDynamicIPAllocation` in VSD API.
"""
return self._dual_stack_dynamic_ip_allocation
@dual_stack_dynamic_ip_allocation.setter
def dual_stack_dynamic_ip_allocation(self, value):
""" Set dual_stack_dynamic_ip_allocation value.
Notes:
This value indicates whether dynamic address allocation is enabled or not. This will be applicable when subnet is in dual stack mode.
This attribute is named `dualStackDynamicIPAllocation` in VSD API.
"""
self._dual_stack_dynamic_ip_allocation = value
@property
def public(self):
""" Get public value.
Notes:
when set to true means public subnet under a public zone
"""
return self._public
@public.setter
def public(self, value):
""" Set public value.
Notes:
when set to true means public subnet under a public zone
"""
self._public = value
@property
def subnet_vlanid(self):
""" Get subnet_vlanid value.
Notes:
Determines the VLANID for this associated Subnet.
This attribute is named `subnetVLANID` in VSD API.
"""
return self._subnet_vlanid
@subnet_vlanid.setter
def subnet_vlanid(self, value):
""" Set subnet_vlanid value.
Notes:
Determines the VLANID for this associated Subnet.
This attribute is named `subnetVLANID` in VSD API.
"""
self._subnet_vlanid = value
@property
def multi_home_enabled(self):
""" Get multi_home_enabled value.
Notes:
Boolean flag to indicate whether this is a Multi-homed subnet or not.
This attribute is named `multiHomeEnabled` in VSD API.
"""
return self._multi_home_enabled
@multi_home_enabled.setter
def multi_home_enabled(self, value):
""" Set multi_home_enabled value.
Notes:
Boolean flag to indicate whether this is a Multi-homed subnet or not.
This attribute is named `multiHomeEnabled` in VSD API.
"""
self._multi_home_enabled = value
@property
def multicast(self):
""" Get multicast value.
Notes:
multicast is enum that indicates multicast policy on Subnet/Subnet Template.
"""
return self._multicast
@multicast.setter
def multicast(self, value):
""" Set multicast value.
Notes:
multicast is enum that indicates multicast policy on Subnet/Subnet Template.
"""
self._multicast = value
@property
def customer_id(self):
""" Get customer_id value.
Notes:
CustomerID that is used by NETCONF MANAGER to identify this enterprise. This can be configured by root user.
This attribute is named `customerID` in VSD API.
"""
return self._customer_id
@customer_id.setter
def customer_id(self, value):
""" Set customer_id value.
Notes:
CustomerID that is used by NETCONF MANAGER to identify this enterprise. This can be configured by root user.
This attribute is named `customerID` in VSD API.
"""
self._customer_id = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return False
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.template_id
| bsd-3-clause | -5,173,366,400,604,724,000 | 30.167903 | 358 | 0.585667 | false | 4.479939 | false | false | false |
levelrf/level_basestation | level/examples/test_transmit.py | 1 | 3457 | #!/usr/bin/python
# python ~/workspace/level_basestation/pre-cog/examples/simple_trx.py --port 12345 --radio-addr 85 --dest-addr 86 --args serial=E8R10Z2B1
# python ~/workspace/level_basestation/pre-cog/examples/simple_trx.py --port 12346 --radio-addr 86 --dest-addr 85 --args serial=E4R11Y0B1
from gnuradio import gr
from gnuradio import uhd
from gnuradio import digital
from gnuradio import blks2
from gnuradio.gr import firdes
import gnuradio.gr.gr_threading as _threading
from gnuradio import level
from gnuradio import extras
from math import pi
from gruel import pmt
import urllib2, time, json
class test_transmit(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "CC430 Transmitter")
self.sent_pkts = 0
# 5555 5555 2c6e fd00 0071 da0b e2
self.packet = chr(0x55)*4 # preamble
self.packet += chr(0x2c) + chr(0x6e) # sync
self.packet += chr(0xfc) # length
self.packet += chr(0x00) + chr(0x00) + chr(0x00) # payload
self.packet += chr(0x71) + chr(0xda) + chr(0x0b) + chr(0xe2) # CRC (currently incorrect)
# Variables
self.samp_rate = samp_rate = 125e3
self.f_center = f_center = 868e6
self.bandwidth = bandwidth = 200e3
self.gain = gain = 5
self.msgq = msgq = gr.msg_queue()
# Blocks
self.uhd_sink = uhd.usrp_sink(
device_addr="",
stream_args=uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_sink.set_samp_rate(samp_rate)
self.uhd_sink.set_center_freq(f_center, 0)
self.uhd_sink.set_gain(gain, 0)
self.uhd_sink.set_bandwidth(bandwidth, 0)
self.msg_src = gr.message_source(1, msgq)
self.msk = level.msk_mod_bc(
samples_per_symbol=2,
bt=0.3
)
# Connections
self.connect(self.msg_src, self.msk, self.uhd_sink)
def send_pkt(self, payload):
msg = gr.message_from_string(str(payload))
self.msgq.insert_tail(msg)
def get_top_hn(self):
try:
f_page = urllib2.urlopen("http://api.ihackernews.com/page").read()
except urllib2.HTTPError:
return "HN returned server error: 0"
fj = json.loads(f_page)
title = fj['items'][0]['title']
score = fj['items'][0]['points']
return str(title) + ":" + str(score)
def form_packet(self, payload):
length = len(payload)
self.packet = chr(0x55)*4 # preamble
self.packet += chr(0xd3) + chr(0x91) # sync
self.packet += chr(length) # length
self.packet += str(payload)
self.packet += chr(0x71) + chr(0xda) + chr(0x0b) + chr(0xe2) # CRC (currently incorrect)
def main_loop(self):
while True:
payload = self.get_top_hn()
print payload
self.packet = self.form_packet(payload)
self.send_pkt(self.packet)
self.sent_pkts += 1
try:
time.sleep(5)
except KeyboardInterrupt:
print "\n\nSent Packets:", self.sent_pkts, "\n"
break
if __name__ == '__main__':
tx = test_transmit()
r = gr.enable_realtime_scheduling()
tx.start()
tx.main_loop()
| gpl-3.0 | -749,694,025,709,994,400 | 33.227723 | 137 | 0.558577 | false | 3.372683 | false | false | false |
dagar/Firmware | Tools/HIL/run_tests.py | 1 | 3547 | #! /usr/bin/python
import serial, time
import subprocess
from subprocess import call, Popen
from argparse import ArgumentParser
import re
def do_test(port, baudrate, test_name):
databits = serial.EIGHTBITS
stopbits = serial.STOPBITS_ONE
parity = serial.PARITY_NONE
ser = serial.Serial(port, baudrate, databits, parity, stopbits, timeout=10)
ser.write('\n\n')
finished = 0
success = False
timeout = 10 # 10 seconds
timeout_start = time.time()
while finished == 0:
serial_line = ser.readline()
print(serial_line.replace('\n',''))
if "nsh>" in serial_line:
finished = 1
if time.time() > timeout_start + timeout:
print("Error, timeout")
finished = 1
break
# run test
ser.write('tests ' + test_name + '\n')
time.sleep(0.05)
finished = 0
timeout = 300 # 5 minutes
timeout_start = time.time()
timeout_newline = time.time()
while finished == 0:
serial_line = ser.readline()
print(serial_line.replace('\n',''))
if test_name + " PASSED" in serial_line:
finished = 1
success = True
elif test_name + " FAILED" in serial_line:
finished = 1
success = False
if time.time() > timeout_start + timeout:
print("Error, timeout")
print(test_name + " FAILED")
finished = 1
success = False
break
# newline every 30 seconds if still running
if time.time() - timeout_newline > 30:
ser.write('\n')
timeout_newline = time.time()
ser.close()
return success
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--device', "-d", nargs='?', default = None, help='')
parser.add_argument("--baudrate", "-b", dest="baudrate", type=int, help="Mavlink port baud rate (default=57600)", default=57600)
args = parser.parse_args()
do_test(args.device, args.baudrate, "autodeclination")
do_test(args.device, args.baudrate, "bezier")
do_test(args.device, args.baudrate, "bson")
do_test(args.device, args.baudrate, "commander")
do_test(args.device, args.baudrate, "controllib")
do_test(args.device, args.baudrate, "conv")
do_test(args.device, args.baudrate, "ctlmath")
#do_test(args.device, args.baudrate, "dataman")
do_test(args.device, args.baudrate, "float")
do_test(args.device, args.baudrate, "hrt")
do_test(args.device, args.baudrate, "int")
do_test(args.device, args.baudrate, "IntrusiveQueue")
do_test(args.device, args.baudrate, "List")
do_test(args.device, args.baudrate, "mathlib")
do_test(args.device, args.baudrate, "matrix")
do_test(args.device, args.baudrate, "microbench_hrt")
do_test(args.device, args.baudrate, "microbench_math")
do_test(args.device, args.baudrate, "microbench_matrix")
do_test(args.device, args.baudrate, "microbench_uorb")
#do_test(args.device, args.baudrate, "mixer")
do_test(args.device, args.baudrate, "param")
do_test(args.device, args.baudrate, "parameters")
do_test(args.device, args.baudrate, "perf")
do_test(args.device, args.baudrate, "search_min")
do_test(args.device, args.baudrate, "sleep")
do_test(args.device, args.baudrate, "smoothz")
do_test(args.device, args.baudrate, "time")
do_test(args.device, args.baudrate, "uorb")
do_test(args.device, args.baudrate, "versioning")
if __name__ == "__main__":
main()
| bsd-3-clause | -5,418,919,410,731,338,000 | 31.245455 | 132 | 0.620242 | false | 3.374881 | true | false | false |
chrischambers/django-calendartools | calendartools/views/agenda.py | 1 | 1141 | from datetime import date
from calendartools import defaults
from calendartools.views.calendars import (
YearView, TriMonthView, MonthView, WeekView, DayView
)
class YearAgenda(YearView):
template_name = 'calendar/agenda/year.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class MonthAgenda(MonthView):
template_name = 'calendar/agenda/month.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class TriMonthAgenda(TriMonthView):
template_name = 'calendar/agenda/tri_month.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class WeekAgenda(WeekView):
template_name = 'calendar/agenda/week.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class DayAgenda(DayView):
template_name = 'calendar/agenda/day.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
def today_agenda(request, slug, *args, **kwargs):
today = date.today()
view = DayAgenda(request=request, slug=slug, year=str(today.year),
month=str(today.strftime('%b').lower()), day=str(today.day), **kwargs)
return view.get(request, slug=slug, year=today.year, day=today.day)
| bsd-3-clause | 3,052,311,570,141,808,600 | 30.694444 | 89 | 0.72305 | false | 3.117486 | false | false | false |
dzeban/batti-gtk | src/Notificator.py | 1 | 2868 |
'''
This file is part of batti, a battery monitor for the system tray.
Copyright (C) 2010 Arthur Spitzer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import dbus
from dbus.exceptions import DBusException
class Notificator:
def __init__(self):
try:
bus = dbus.SessionBus()
obj = bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications')
self.__notify = dbus.Interface(obj, 'org.freedesktop.Notifications')
self.__last_id = dbus.UInt32(0)
self.__posx = -1
self.__posy = -1
self.__positioned = True
self.__duration = 3000
self.__name = 'batti'
self.__check_capabilities()
except DBusException:
self.__notify = None
self.__positioned = False
def __check_capabilities(self):
info = self.__notify.GetServerInformation()
if info[0] == 'notify-osd':
self.__positioned = False
def __show_positioned(self):
if self.__positioned:
return (self.__posx >= 0 and self.__posy >= 0)
else:
return False
def __show(self, icon, subject, msg, urgent):
if self.__notify is not None:
hints = {'urgency':dbus.Byte(urgent), 'desktop-entry':dbus.String('battery-monitor')}
if( self.__show_positioned() ):
hints['x'] = self.__posx
hints['y'] = self.__posy
self.__last_id = self.__notify.Notify(self.__name, self.__last_id, icon, subject, msg, [], hints, self.__duration)
def show(self, icon, subject, msg):
self.__show(icon, subject, msg, 1)
def show_urgent(self, icon, subject, msg):
self.__show(icon, subject, msg, 2)
def close(self):
if (self.__notify is not None) and self.__last_id:
self.__notify.CloseNotification(self.__last_id)
def setPosition(self, x, y):
self.__posx = x
self.__posy = y
def removePosition(self):
self.__posx = -1
self.__posy = -1
def setDuration(self, milSec):
''' Set the duration on a notification with milSec milliseconds '''
self.__duration = milSec
| gpl-2.0 | 959,022,370,888,630,300 | 30.877778 | 126 | 0.589958 | false | 4.138528 | false | false | false |
DrGFreeman/RasPiBot202 | maze.py | 1 | 7175 | import networkx as nx
import math
class Maze:
def __init__(self):
self.g = nx.MultiGraph()
self.newNodeUid = 0
self.startNode = None
self.finishNode = None
self.distTol = 75 # Distance tolerance to consider two nodes to be the same
self.farAway = 10000 # A long distance...
def addNode(self, nbPathsOut = 0, start = False, finish = False,):
uid = self.getNewNodeUid()
# Create intersection node object
newNode = Node(uid, nbPathsOut, start, finish)
# If start, define object as maze start
if start:
self.setStartNode(newNode)
if finish:
self.setFinishNode(newNode)
# Create corresponding graph node
self.g.add_node(newNode)
# Return new node
return newNode
def addPath(self, fromNode, toNode, outHeading, inHeading, length):
newPath = Path(fromNode, toNode, outHeading, inHeading)
self.g.add_edge(fromNode, toNode, newPath, weight = length)
def areNeighbors(self, node1, node2):
for neighbor in self.g.neighbors(node1):
if neighbor == node2:
areNeighbors = True
else:
areNeighbors = False
return areNeighbors
# Method to be called when exploring the maze. It will create a new node at position x, y if it does
# not already exists. It will create a path object from the source node to the current node position.
def exploreNode(self, sourceNode, x, y, nbPathsOut, pathLength, outHeading, inHeading, start = False, finish = False):
# Check if already exists
if self.nodeExistsAtPos(x, y):
currentNode = self.getNodeAtPos(x, y)
print "Current node: ", currentNode.uid, " (existing)"
# Check if path loops back to sourceNode
if currentNode == sourceNode:
if currentNode.nbPathsOut <= 1:
currentNode.nbPathsOut = 0
else:
currentNode.nbPathsOut -= 1
print "Loop to self, reducing nbPathsOut for node ", currentNode.uid, " to ", currentNode.nbPathsOut
else:
# Create new node
currentNode = self.addNode(nbPathsOut, start, finish)
currentNode.setPos(x, y)
print "Current node: ", currentNode.uid, " (new)"
# Create path edge from sourceNode to node
self.addPath(sourceNode, currentNode, outHeading, inHeading, pathLength)
return currentNode
def getHeadingToGoal(self, currentNode, goalNode):
nextNode = self.getNextNodeInShortestPath(currentNode, goalNode)
nextPath = self.getPathToNeighbor(currentNode, nextNode)
return nextPath.getHeadingToNode(nextNode)
def getNewNodeUid(self):
uid = self.newNodeUid
self.newNodeUid += 1
return uid
# Finds the nearest node from which there are unvisited paths
def getNearestUnvisited(self, currentNode):
shortestLength = self.farAway
for node in self.g.nodes():
if self.g.degree(node) < node.nbPathsOut + 1:
length = nx.shortest_path_length(self.g, currentNode, node, weight = 'weight')
print "Length to node ", node.uid, ": ", length
if length < shortestLength:
nearestUnvisited = node
shortestLength = length
print "Distance to nearest node with unvisited paths: ", shortestLength
return nearestUnvisited
def getNextNodeInShortestPath(self, currentNode, goalNode):
path = nx.shortest_path(self.g, currentNode, goalNode, weight = 'weight')
if len(path) ==1:
return path[0]
else:
return path[1]
# Finds the next node in the path to the nearest node with unvisited paths
def getNextNodeToNearestUnvisited(self, currentNode):
nearestUnvisited = self.getNearestUnvisited(currentNode)
path = nx.shortest_path(self.g, currentNode, nearestUnvisited, weight = 'weight')
if len(path) == 1:
print "Next node with unvisited paths: ", path[0].uid, " (current node)"
return path[0]
else:
print "Next node with unvisited paths: ", path[1].uid
return path[1]
def getNodeAtPos(self, x, y):
for node in self.g.nodes():
if node.getDistance(x, y) < self.distTol:
return node
def getNodeByUid(self, uid):
for node in self.g.nodes():
if node.uid == uid:
return node
def getPathToNeighbor(self, currentNode, neighborNode):
paths = self.g[currentNode][neighborNode].items()
shortestLength = self.farAway
for path in paths:
if path[1]['weight'] < shortestLength:
shortestPath = path[0]
shortestLength = path[1]['weight']
return shortestPath
def hasUnvisitedPaths(self):
hasUnvisitedPaths = False
for node in self.g.nodes():
if self.g.degree(node) < node.nbPathsOut + 1:
hasUnvisitedPaths = True
return hasUnvisitedPaths
def headingIsUnvisited(self, currentNode, heading):
visitedHeadings = []
for node in self.g.neighbors(currentNode):
paths = self.g[currentNode][node].items()
for path in paths:
visitedHeadings.append(path[0].getHeadingToNode(node))
headingIsUnvisited = True
if visitedHeadings.count(heading) == 1:
headingIsUnvisited = False
return headingIsUnvisited
def nodeExistsAtPos(self, x, y):
for node in self.g.nodes():
if node.getDistance(x, y) < self.distTol:
return True
def setFinishNode(self, node):
if self.finishNode is None:
self.finishNode = node
else:
print 'Error: Finish node already defined'
def setStartNode(self, node):
if self.startNode is None:
self.startNode = node
else:
print 'Error: Start node already defined'
class Node:
def __init__(self, uid, nbPathsOut, start = False, finish = False):
self.uid = uid
self.start = start
self.finish = finish
self.nbPathsOut = nbPathsOut
def getDistance(self, x, y):
return math.sqrt((self.x - x)**2 + (self.y - y)**2)
def setPos(self, x, y):
self.x = x
self.y = y
def getPos(self):
return self.x, self.y
class Path:
def __init__(self, nodeFrom, nodeTo, nodeFromOutHeading, nodeToInHeading):
self.node0 = nodeFrom
self.node0OutHeading = nodeFromOutHeading
self.node1 = nodeTo
self.node1OutHeading = self.inverseHeading(nodeToInHeading)
def inverseHeading(self, heading):
inHeading = ['E', 'N', 'W', 'S']
outHeading = ['W', 'S', 'E', 'N']
return outHeading[inHeading.index(heading)]
def getHeadingToNode(self, node):
if node == self.node0:
return self.node1OutHeading
elif node == self.node1:
return self.node0OutHeading
| mit | -6,924,944,892,534,273,000 | 35.794872 | 122 | 0.605714 | false | 3.986111 | true | false | false |
naototty/vagrant-lxc-ironic | ironic/drivers/modules/ipmitool.py | 1 | 42906 | # coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import tempfile
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.openstack.common import loopingcall
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
out, err = utils.execute(*cmd)
except processutils.ProcessExecutionError:
# the local ipmitool does not support the command.
_is_option_supported(opt, False)
else:
# looks like ipmitool supports the command.
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(tempfile.gettempdir(), file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
if f is not None:
f.close()
raise
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = info.get('ipmi_password')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
if port:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"IPMI terminal port is not an integer."))
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
This uses the lanplus interface to communicate with the BMC device driver.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
args = ['ipmitool',
'-I',
'lanplus',
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(
driver_info['password'] or '\0'
) as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in e.args[0]]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'),
{
'node': driver_info['uuid'],
'cmd': e.cmd,
'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'),
{
'node': driver_info['uuid'],
'cmd': e.cmd,
'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"), {'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s") % {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def _check_temp_dir():
"""Check for Valid temp directory."""
global TMP_DIR_CHECKED
# because a temporary file is used to pass the password to ipmitool,
# we should check the directory
if TMP_DIR_CHECKED is None:
try:
utils.check_dir()
except (exception.PathNotFound,
exception.DirectoryNotWritable,
exception.InsufficientDiskSpace) as e:
TMP_DIR_CHECKED = False
err_msg = (_("Ipmitool drivers need to be able to create "
"temporary files to pass password to ipmitool. "
"Encountered error: %s") % e)
e.message = err_msg
LOG.error(err_msg)
raise
else:
TMP_DIR_CHECKED = True
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices():
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: ConsoleError if unable to stop the console
"""
driver_info = _parse_driver_info(task.node)
try:
console_utils.stop_shellinabox_console(driver_info['uuid'])
finally:
utils.unlink_without_raise(
_console_pwfile_path(driver_info['uuid']))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| apache-2.0 | -437,569,069,582,017,540 | 38.617729 | 79 | 0.600289 | false | 4.156753 | false | false | false |
Ictp/indico | indico/MaKaC/user.py | 1 | 59857 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import operator
from BTrees.OOBTree import OOTreeSet, union
from persistent import Persistent
from accessControl import AdminList, AccessWrapper
import MaKaC
from MaKaC.common import filters, indexes
from MaKaC.common.cache import GenericCache
from MaKaC.common.Locators import Locator
from MaKaC.common.ObjectHolders import ObjectHolder
from MaKaC.errors import UserError, MaKaCError
from MaKaC.trashCan import TrashCanManager
import MaKaC.common.info as info
from MaKaC.i18n import _
from MaKaC.authentication.AuthenticationMgr import AuthenticatorMgr
from MaKaC.common.logger import Logger
from MaKaC.fossils.user import IAvatarFossil, IAvatarAllDetailsFossil,\
IGroupFossil, IPersonalInfoFossil, IAvatarMinimalFossil
from MaKaC.common.fossilize import Fossilizable, fossilizes
from pytz import all_timezones
from MaKaC.plugins.base import PluginsHolder
from indico.util.caching import order_dict
from indico.util.decorators import cached_classproperty
from indico.util.event import truncate_path
from indico.util.redis import write_client as redis_write_client
from indico.util.redis import avatar_links, suggestions
from flask import request
"""Contains the classes that implement the user management subsystem
"""
class Group(Persistent, Fossilizable):
fossilizes(IGroupFossil)
"""
"""
groupType = "Default"
def __init__(self, groupData=None):
self.id = ""
self.name = ""
self.description = ""
self.email = ""
self.members = []
self.obsolete = False
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
return cmp(self.getId(), other.getId())
def setId(self, newId):
self.id = str(newId)
def getId(self):
return self.id
def setName(self, newName):
self.name = newName.strip()
GroupHolder().notifyGroupNameChange(self)
def getName(self):
return self.name
getFullName = getName
def setDescription(self, newDesc):
self.description = newDesc.strip()
def getDescription(self):
return self.description
def setEmail(self, newEmail):
self.email = newEmail.strip()
def getEmail(self):
try:
return self.email
except:
self.email = ""
return self.email
def isObsolete(self):
if not hasattr(self, "obsolete"):
self.obsolete = False
return self.obsolete
def setObsolete(self, obsolete):
self.obsolete = obsolete
def addMember(self, newMember):
if newMember == self:
raise MaKaCError(_("It is not possible to add a group as member of itself"))
if self.containsMember(newMember) or newMember.containsMember(self):
return
self.members.append(newMember)
if isinstance(newMember, Avatar):
newMember.linkTo(self, "member")
self._p_changed = 1
def removeMember(self, member):
if member == None or member not in self.members:
return
self.members.remove(member)
if isinstance(member, Avatar):
member.unlinkTo(self, "member")
self._p_changed = 1
def getMemberList(self):
return self.members
def _containsUser(self, avatar):
if avatar == None:
return 0
for member in self.members:
if member.containsUser(avatar):
return 1
return 0
def containsUser(self, avatar):
group_membership = GenericCache('groupmembership')
if avatar is None:
return False
key = "{0}-{1}".format(self.getId(), avatar.getId())
user_in_group = group_membership.get(key)
if user_in_group is None:
user_in_group = self._containsUser(avatar)
group_membership.set(key, user_in_group, time=1800)
return user_in_group
def containsMember(self, member):
if member == None:
return 0
if member in self.members:
return 1
for m in self.members:
try:
if m.containsMember(member):
return 1
except AttributeError, e:
continue
return 0
def canModify(self, aw):
return self.canUserModify(aw.getUser())
def canUserModify(self, user):
return self.containsMember(user) or \
(user in AdminList.getInstance().getList())
def getLocator(self):
d = Locator()
d["groupId"] = self.getId()
return d
def exists(self):
return True
class _GroupFFName(filters.FilterField):
_id="name"
def satisfies(self,group):
for value in self._values:
if value.strip() != "":
if value.strip() == "*":
return True
if str(group.getName()).lower().find((str(value).strip().lower()))!=-1:
return True
return False
class _GroupFilterCriteria(filters.FilterCriteria):
_availableFields={"name":_GroupFFName}
def __init__(self,criteria={}):
filters.FilterCriteria.__init__(self,None,criteria)
class GroupHolder(ObjectHolder):
"""
"""
idxName = "groups"
counterName = "PRINCIPAL"
def add(self, group):
ObjectHolder.add(self, group)
self.getIndex().indexGroup(group)
def remove(self, group):
ObjectHolder.remove(self, group)
self.getIndex().unindexGroup(group)
def notifyGroupNameChange(self, group):
self.getIndex().unindexGroup(group)
self.getIndex().indexGroup(group)
def getIndex(self):
index = indexes.IndexesHolder().getById("group")
if index.getLength() == 0:
self._reIndex(index)
return index
def _reIndex(self, index):
for group in self.getList():
index.indexGroup(group)
def getBrowseIndex(self):
return self.getIndex().getBrowseIndex()
def getLength(self):
return self.getIndex().getLength()
def matchFirstLetter(self, letter, searchInAuthenticators=True):
result = []
index = self.getIndex()
if searchInAuthenticators:
self._updateGroupMatchFirstLetter(letter)
match = index.matchFirstLetter(letter)
if match != None:
for groupid in match:
if groupid != "":
if self.getById(groupid) not in result:
gr=self.getById(groupid)
result.append(gr)
return result
def match(self, criteria, searchInAuthenticators=True, exact=False):
crit={}
result = []
for f,v in criteria.items():
crit[f]=[v]
if crit.has_key("groupname"):
crit["name"] = crit["groupname"]
if searchInAuthenticators:
self._updateGroupMatch(crit["name"][0],exact)
match = self.getIndex().matchGroup(crit["name"][0], exact=exact)
if match != None:
for groupid in match:
gr = self.getById(groupid)
if gr not in result:
result.append(gr)
return result
def update(self, group):
if self.hasKey(group.getId()):
current_group = self.getById(group.getId())
current_group.setDescription(group.getDescription())
def _updateGroupMatch(self, name, exact=False):
for auth in AuthenticatorMgr().getList():
for group in auth.matchGroup(name, exact):
if not self.hasKey(group.getId()):
self.add(group)
else:
self.update(group)
def _updateGroupMatchFirstLetter(self, letter):
for auth in AuthenticatorMgr().getList():
for group in auth.matchGroupFirstLetter(letter):
if not self.hasKey(group.getId()):
self.add(group)
else:
self.update(group)
class Avatar(Persistent, Fossilizable):
"""This class implements the representation of users inside the system.
Basically it contains personal data from them which is relevant for the
system.
"""
fossilizes(IAvatarFossil, IAvatarAllDetailsFossil, IAvatarMinimalFossil)
# When this class is defined MaKaC.conference etc. are not available yet
@cached_classproperty
@classmethod
def linkedToMap(cls):
from MaKaC.common.timerExec import Alarm
# Hey, when adding new roles don't forget to handle them in AvatarHolder.mergeAvatar, too!
return {
'category': {'cls': MaKaC.conference.Category,
'roles': set(['access', 'creator', 'favorite', 'manager'])},
'conference': {'cls': MaKaC.conference.Conference,
'roles': set(['abstractSubmitter', 'access', 'chair', 'creator', 'editor', 'manager',
'paperReviewManager', 'participant', 'referee', 'registrar', 'reviewer'])},
'session': {'cls': MaKaC.conference.Session,
'roles': set(['access', 'coordinator', 'manager'])},
'contribution': {'cls': MaKaC.conference.Contribution,
'roles': set(['access', 'editor', 'manager', 'referee', 'reviewer', 'submission'])},
'track': {'cls': MaKaC.conference.Track,
'roles': set(['coordinator'])},
'material': {'cls': MaKaC.conference.Material,
'roles': set(['access'])},
'resource': {'cls': MaKaC.conference.Resource,
'roles': set(['access'])},
'abstract': {'cls': MaKaC.review.Abstract,
'roles': set(['submitter'])},
'registration': {'cls': MaKaC.registration.Registrant,
'roles': set(['registrant'])},
'group': {'cls': MaKaC.user.Group,
'roles': set(['member'])},
'evaluation': {'cls': MaKaC.evaluation.Submission,
'roles': set(['submitter'])},
'alarm': {'cls': Alarm,
'roles': set(['to'])}
}
def __init__(self, userData=None):
"""Class constructor.
Attributes:
userData -- dictionary containing user data to map into the
avatar. Possible key values (those with * are
multiple):
name, surname, title, organisation*, addess*,
email*, telephone*, fax*
"""
self.id = ""
self.personId = None
self.name = ""
self.surName = ""
self.title = ""
self.organisation = [""]
self.address = [""]
self.email = ""
self.secondaryEmails = []
self.pendingSecondaryEmails = []
self.telephone = [""]
self.fax = [""]
self.identities = []
self.status = "Not confirmed" # The status can be 'activated', 'disabled' or 'Not confirmed'
from MaKaC.common import utils
self.key = utils.newKey() #key to activate the account
self.registrants = {}
self.apiKey = None
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
self._lang = minfo.getLang()
self._mergeTo = None
self._mergeFrom = []
#################################
#Fermi timezone awareness #
#################################
self.timezone = ""
self.displayTZMode = ""
#################################
#Fermi timezone awareness(end) #
#################################
self.resetLinkedTo()
self.personalInfo = PersonalInfo()
self.unlockedFields = [] # fields that are not synchronized with auth backends
self.authenticatorPersonalData = {} # personal data from authenticator
if userData is not None:
if 'name' in userData:
self.setName(userData["name"])
if 'surName' in userData:
self.setSurName(userData["surName"])
if 'title' in userData:
self.setTitle(userData["title"])
if 'organisation' in userData:
if len(userData["organisation"])>0:
for org in userData["organisation"]:
if not self.getOrganisation():
self.setOrganisation(org)
else:
self.addOrganisation(org)
if 'address' in userData:
if len(userData["address"])>0:
for addr in userData["address"]:
self.addAddress(addr)
if 'email' in userData:
if type(userData["email"]) == str:
self.setEmail(userData["email"])
elif len(userData["email"])>0:
for em in userData["email"]:
self.setEmail(em)
if 'telephone' in userData:
if len(userData["telephone"])>0:
for tel in userData["telephone"]:
self.addTelephone(tel)
if 'fax' in userData:
if len(userData["fax"])>0:
for fax in userData["fax"]:
self.addTelephone(fax)
############################
#Fermi timezone awareness #
############################
if 'timezone' in userData:
self.setTimezone(userData["timezone"])
else:
self.setTimezone(info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone())
self.setDisplayTZMode(userData.get("displayTZMode", "Event Timezone"))
def __repr__(self):
return '<Avatar({0}, {1})>'.format(self.getId(), self.getFullName())
def mergeTo(self, av):
if av:
av.mergeFrom(self)
if self.getMergeTo():
self._mergeTo.unmergeFrom(self)
self._mergeTo = av
def getMergeTo(self):
try:
return self._mergeTo
except:
self._mergeTo = None
return self._mergeTo
def isMerged(self):
if self.getMergeTo():
return True
return False
def mergeFrom(self, av):
if not av in self.getMergeFromList():
self._mergeFrom.append(av)
self._p_changed = 1
def unmergeFrom(self, av):
if av in self.getMergeFromList():
self._mergeFrom.remove(av)
self._p_changed = 1
def getMergeFromList(self):
try:
return self._mergeFrom
except:
self._mergeFrom = []
return self._mergeFrom
def getKey(self):
return self.key
def getAPIKey(self):
try:
return self.apiKey
except:
self.apiKey = None
return self.apiKey
def setAPIKey(self, apiKey):
self.apiKey = apiKey
def getRelatedCategories(self):
favorites = self.getLinkTo('category', 'favorite')
managed = self.getLinkTo('category', 'manager')
res = {}
for categ in union(favorites, managed):
res[(categ.getTitle(), categ.getId())] = {
'categ': categ,
'favorite': categ in favorites,
'managed': categ in managed,
'path': truncate_path(categ.getCategoryPathTitles(), 30, False)
}
return OrderedDict(sorted(res.items(), key=operator.itemgetter(0)))
def getSuggestedCategories(self):
if not redis_write_client:
return []
related = union(self.getLinkTo('category', 'favorite'), self.getLinkTo('category', 'manager'))
res = []
for id, score in suggestions.get_suggestions(self, 'category').iteritems():
categ = MaKaC.conference.CategoryManager().getById(id)
if not categ or categ.isSuggestionsDisabled() or categ in related:
continue
if any(p.isSuggestionsDisabled() for p in categ.iterParents()):
continue
aw = AccessWrapper()
aw.setUser(self)
if request:
aw.setIP(request.remote_addr)
if not categ.canAccess(aw):
continue
res.append({
'score': score,
'categ': categ,
'path': truncate_path(categ.getCategoryPathTitles(), 30, False)
})
return res
def resetLinkedTo(self):
self.linkedTo = {}
self.updateLinkedTo()
self._p_changed = 1
def getLinkedTo(self):
try:
return self.linkedTo
except AttributeError:
self.resetLinkedTo()
return self.linkedTo
def updateLinkedTo(self):
self.getLinkedTo() # Create attribute if does not exist
for field, data in self.linkedToMap.iteritems():
self.linkedTo.setdefault(field, {})
for role in data['roles']:
self.linkedTo[field].setdefault(role, OOTreeSet())
def linkTo(self, obj, role):
# to avoid issues with zombie avatars
if not AvatarHolder().hasKey(self.getId()):
return
self.updateLinkedTo()
for field, data in self.linkedToMap.iteritems():
if isinstance(obj, data['cls']):
if role not in data['roles']:
raise ValueError('role %s is not allowed for %s objects' % (role, type(obj).__name__))
self.linkedTo[field][role].add(obj)
self._p_changed = 1
if redis_write_client:
event = avatar_links.event_from_obj(obj)
if event:
avatar_links.add_link(self, event, field + '_' + role)
break
def getLinkTo(self, field, role):
self.updateLinkedTo()
return self.linkedTo[field][role]
def unlinkTo(self, obj, role):
# to avoid issues with zombie avatars
if not AvatarHolder().hasKey(self.getId()):
return
self.updateLinkedTo()
for field, data in self.linkedToMap.iteritems():
if isinstance(obj, data['cls']):
if role not in data['roles']:
raise ValueError('role %s is not allowed for %s objects' % (role, type(obj).__name__))
if obj in self.linkedTo[field][role]:
self.linkedTo[field][role].remove(obj)
self._p_changed = 1
if redis_write_client:
event = avatar_links.event_from_obj(obj)
if event:
avatar_links.del_link(self, event, field + '_' + role)
break
def getStatus(self):
try:
return self.status
except AttributeError:
self.status = "activated"
return self.status
def setStatus(self, status):
statIdx = indexes.IndexesHolder().getById("status")
statIdx.unindexUser(self)
self.status = status
self._p_changed = 1
statIdx.indexUser(self)
def activateAccount(self, checkPending=True):
self.setStatus("activated")
if checkPending:
#----Grant rights if any
from MaKaC.common import pendingQueues
pendingQueues.PendingQueuesHolder().grantRights(self)
def disabledAccount(self):
self.setStatus("disabled")
def isActivated(self):
return self.status == "activated"
def isDisabled(self):
return self.status == "disabled"
def isNotConfirmed(self):
return self.status == "Not confirmed"
def setId(self, id):
self.id = str(id)
def getId(self):
return self.id
def setPersonId(self, personId):
self.personId = personId
def getPersonId(self):
return getattr(self, 'personId', None)
def setName(self, name, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('name')
idx.unindexUser(self)
self.name = name
idx.indexUser(self)
else:
self.name = name
self._p_changed = 1
def getName(self):
return self.name
getFirstName = getName
setFirstName = setName
def setSurName(self, name, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('surName')
idx.unindexUser(self)
self.surName = name
idx.indexUser(self)
else:
self.surName = name
def getSurName(self):
return self.surName
def getFamilyName(self):
return self.surName
def getFullName(self):
surName = ""
if self.getSurName() != "":
# accented letter capitalization requires all these encodes/decodes
surName = "%s, " % self.getSurName().decode('utf-8').upper().encode('utf-8')
return "%s%s"%(surName, self.getName())
def getStraightFullName(self, upper = True):
return ("%s %s"%(self.getFirstName(), self.getFamilyName().upper() if upper else self.getFamilyName())).strip()
getDirectFullNameNoTitle = getStraightFullName
def getAbrName(self):
res = self.getSurName()
if self.getName() != "":
if res != "":
res = "%s, "%res
res = "%s%s."%(res, self.getName()[0].upper())
return res
def getStraightAbrName(self):
name = ""
if self.getName() != "":
name = "%s. "%self.getName()[0].upper()
return "%s%s"%(name, self.getSurName())
def addOrganisation(self, newOrg, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('organisation')
idx.unindexUser(self)
self.organisation.append(newOrg.strip())
idx.indexUser(self)
else:
self.organisation.append(newOrg.strip())
self._p_changed = 1
def setOrganisation(self, org, item=0, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('organisation')
idx.unindexUser(self)
self.organisation[item] = org.strip()
idx.indexUser(self)
else:
self.organisation[item] = org.strip()
self._p_changed = 1
setAffiliation = setOrganisation
def getOrganisations(self):
return self.organisation
def getOrganisation(self):
return self.organisation[0]
getAffiliation = getOrganisation
def setTitle(self, title):
self.title = title
def getTitle(self):
return self.title
#################################
#Fermi timezone awareness #
#################################
def setTimezone(self,tz=None):
if not tz:
tz = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()
self.timezone = tz
def getTimezone(self):
tz = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()
try:
if self.timezone in all_timezones:
return self.timezone
else:
self.setTimezone(tz)
return tz
except:
self.setTimezone(tz)
return tz
def setDisplayTZMode(self,display_tz='Event Timezone'):
self.displayTZMode = display_tz
def getDisplayTZMode(self):
return self.displayTZMode
#################################
#Fermi timezone awareness(end) #
#################################
def addAddress(self, newAddress):
self.address.append(newAddress)
self._p_changed = 1
def getAddresses(self):
return self.address
def getAddress(self):
return self.address[0]
def setAddress(self, address, item=0):
self.address[item] = address
self._p_changed = 1
def setEmail(self, email, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('email')
idx.unindexUser(self)
self.email = email.strip().lower()
idx.indexUser(self)
else:
self.email = email.strip().lower()
def getEmails(self):
return [self.email] + self.getSecondaryEmails()
def getEmail(self):
return self.email
def getSecondaryEmails(self):
try:
return self.secondaryEmails
except:
self.secondaryEmails = []
return self.secondaryEmails
def addSecondaryEmail(self, email):
email = email.strip().lower()
if not email in self.getSecondaryEmails():
self.secondaryEmails.append(email)
self._p_changed = 1
def removeSecondaryEmail(self, email):
email = email.strip().lower()
if email in self.getSecondaryEmails():
self.secondaryEmails.remove(email)
self._p_changed = 1
def setSecondaryEmails(self, emailList, reindex=False):
emailList = map(lambda email: email.lower().strip(), emailList)
if reindex:
idx = indexes.IndexesHolder().getById('email')
idx.unindexUser(self)
self.secondaryEmails = emailList
idx.indexUser(self)
else:
self.secondaryEmails = emailList
def hasEmail(self, email):
l = [self.email] + self.getSecondaryEmails()
return email.lower().strip() in l
def hasSecondaryEmail(self, email):
return email.lower().strip() in self.getSecondaryEmails()
def getPendingSecondaryEmails(self):
try:
return self.pendingSecondaryEmails
except:
self.pendingSecondaryEmails = []
return self.pendingSecondaryEmails
def addPendingSecondaryEmail(self, email):
email = email.lower().strip()
if not email in self.getPendingSecondaryEmails(): # create attribute if not exist
self.pendingSecondaryEmails.append(email)
self._p_changed = 1
def removePendingSecondaryEmail(self, email):
email = email.lower().strip()
if email in self.getPendingSecondaryEmails(): # create attribute if not exist
self.pendingSecondaryEmails.remove(email)
self._p_changed = 1
def setPendingSecondaryEmails(self, emailList):
self.pendingSecondaryEmails = emailList
def addTelephone(self, newTel):
self.telephone.append(newTel)
self._p_changed = 1
def getTelephone(self):
return self.telephone[0]
getPhone = getTelephone
def setTelephone(self, tel, item=0):
self.telephone[item] = tel
self._p_changed = 1
setPhone = setTelephone
def getTelephones(self):
return self.telephone
def getSecondaryTelephones(self):
return self.telephone[1:]
def addFax(self, newFax):
self.fax.append(newFax)
self._p_changed = 1
def setFax(self, fax, item=0):
self.fax[item] = fax
self._p_changed = 1
def getFax(self):
return self.fax[0]
def getFaxes(self):
return self.fax
def addIdentity(self, newId):
""" Adds a new identity to this Avatar.
:param newId: a new PIdentity or inheriting object
:type newId: PIdentity
"""
if newId != None and (newId not in self.identities):
self.identities.append(newId)
self._p_changed = 1
def removeIdentity(self, Id):
""" Removed an identity from this Avatar.
:param newId: a PIdentity or inheriting object
:type newId: PIdentity
"""
if Id in self.identities:
self.identities.remove(Id)
self._p_changed = 1
def getIdentityList(self, create_identities=False):
""" Returns a list of identities for this Avatar.
Each identity will be a PIdentity or inheriting object
"""
if create_identities:
for authenticator in AuthenticatorMgr().getList():
identities = self.getIdentityByAuthenticatorId(authenticator.getId())
for identity in identities:
self.addIdentity(identity)
return self.identities
def getIdentityByAuthenticatorId(self, authenticatorId):
""" Return a list of PIdentity objects given an authenticator name
:param authenticatorId: the id of an authenticator, e.g. 'Local', 'LDAP', etc
:type authenticatorId: str
"""
result = []
for identity in self.identities:
if identity.getAuthenticatorTag() == authenticatorId:
result.append(identity)
if not result:
identity = AuthenticatorMgr().getById(authenticatorId).fetchIdentity(self)
if identity:
result.append(identity)
return result
def getIdentityById(self, id, tag):
""" Returns a PIdentity object given an authenticator name and the identity's login
:param id: the login string for this identity
:type id: str
:param tag: the name of an authenticator, e.g. 'Local', 'LDAP', etc
:type tag: str
"""
for Id in self.identities:
if Id.getAuthenticatorTag() == tag and Id.getLogin() == id:
return Id
return None
def addRegistrant(self, n):
if n != None and (n.getConference().getId() not in self.getRegistrants().keys()):
self.getRegistrants()[ n.getConference().getId() ] = n
self._p_changed = 1
def removeRegistrant(self, r):
if self.getRegistrants().has_key(r.getConference().getId()):
# unlink registrant from user
self.unlinkTo(r,'registrant')
del self.getRegistrants()[r.getConference().getId()]
self._p_changed = 1
def getRegistrantList(self):
return self.getRegistrants().values()
def getRegistrants(self):
try:
if self.registrants:
pass
except AttributeError, e:
self.registrants = {}
self._p_changed = 1
return self.registrants
def getRegistrantById(self, confId):
if self.getRegistrants().has_key(confId):
return self.getRegistrants()[confId]
return None
def isRegisteredInConf(self, conf):
if conf.getId() in self.getRegistrants().keys():
return True
for email in self.getEmails():
registrant = conf.getRegistrantsByEmail(email)
if registrant:
self.addRegistrant(registrant)
registrant.setAvatar(self)
return True
return False
def hasSubmittedEvaluation(self, evaluation):
for submission in evaluation.getSubmissions():
if submission.getSubmitter()==self:
return True
return False
def containsUser(self, avatar):
return avatar == self
containsMember = containsUser
def canModify(self, aw):
return self.canUserModify(aw.getUser())
def canUserModify(self, user):
return user == self or (user in AdminList.getInstance().getList())
def getLocator(self):
d = Locator()
d["userId"] = self.getId()
return d
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
# Room booking related
def isMemberOfSimbaList(self, simbaListName):
# Try to get the result from the cache
try:
if simbaListName in self._v_isMember.keys():
return self._v_isMember[simbaListName]
except:
self._v_isMember = {}
groups = []
try:
# try to get the exact match first, which is what we expect since
# there shouldn't be uppercase letters
groups.append(GroupHolder().getById(simbaListName))
except KeyError:
groups = GroupHolder().match({ 'name': simbaListName }, searchInAuthenticators = False, exact=True)
if not groups:
groups = GroupHolder().match({ 'name': simbaListName }, exact=True)
if groups:
result = groups[0].containsUser(self)
self._v_isMember[simbaListName] = result
return result
self._v_isMember[simbaListName] = False
return False
def isAdmin(self):
"""
Convenience method for checking whether this user is an admin.
Returns bool.
"""
al = AdminList.getInstance()
if al.isAdmin(self):
return True
return False
def isRBAdmin(self):
"""
Convenience method for checking whether this user is an admin for the RB module.
Returns bool.
"""
if self.isAdmin():
return True
for entity in PluginsHolder().getPluginType('RoomBooking').getOption('Managers').getValue():
if (isinstance(entity, Group) and entity.containsUser(self)) or \
(isinstance(entity, Avatar) and entity == self):
return True
return False
def getRooms(self):
"""
Returns list of rooms (RoomBase derived objects) this
user is responsible for.
"""
from MaKaC.plugins.RoomBooking.default.room import Room
from MaKaC.rb_location import RoomGUID
rooms = Room.getUserRooms(self)
roomList = [ RoomGUID.parse(str(rg)).getRoom() for rg in rooms ] if rooms else []
return [room for room in roomList if room and room.isActive]
def getReservations(self):
"""
Returns list of ALL reservations (ReservationBase
derived objects) this user has ever made.
"""
# self._ensureRoomAndResv()
# resvs = [guid.getReservation() for guid in self.resvGuids]
# return resvs
from MaKaC.rb_location import CrossLocationQueries
from MaKaC.rb_reservation import ReservationBase
resvEx = ReservationBase()
resvEx.createdBy = str(self.id)
resvEx.isCancelled = None
resvEx.isRejected = None
resvEx.isArchival = None
myResvs = CrossLocationQueries.getReservations(resvExample = resvEx)
return myResvs
def getReservationsOfMyRooms(self):
"""
Returns list of ALL reservations (ReservationBase
derived objects) this user has ever made.
"""
# self._ensureRoomAndResv()
# resvs = [guid.getReservation() for guid in self.resvGuids]
# return resvs
from MaKaC.rb_location import CrossLocationQueries
from MaKaC.rb_reservation import ReservationBase
myRooms = self.getRooms() # Just to speed up
resvEx = ReservationBase()
resvEx.isCancelled = None
resvEx.isRejected = None
resvEx.isArchival = None
myResvs = CrossLocationQueries.getReservations(resvExample = resvEx, rooms = myRooms)
return myResvs
def getPersonalInfo(self):
try:
return self.personalInfo
except:
self.personalInfo = PersonalInfo()
return self.personalInfo
def isFieldSynced(self, field):
if not hasattr(self, 'unlockedFields'):
self.unlockedFields = []
return field not in self.unlockedFields
def setFieldSynced(self, field, synced):
# check if the sync state is the same. also creates the list if it's missing
if synced == self.isFieldSynced(field):
pass
elif synced:
self.unlockedFields.remove(field)
self._p_changed = 1
else:
self.unlockedFields.append(field)
self._p_changed = 1
def getNotSyncedFields(self):
if not hasattr(self, 'unlockedFields'):
self.unlockedFields = []
return self.unlockedFields
def setAuthenticatorPersonalData(self, field, value):
fields = {'phone': {'get': self.getPhone,
'set': self.setPhone},
'fax': {'get': self.getFax,
'set': self.setFax},
'address': {'get': self.getAddress,
'set': self.setAddress},
'surName': {'get': self.getSurName,
'set': lambda x: self.setSurName(x, reindex=True)},
'firstName': {'get': self.getFirstName,
'set': lambda x: self.setFirstName(x, reindex=True)},
'affiliation': {'get': self.getAffiliation,
'set': lambda x: self.setAffiliation(x, reindex=True)},
'email': {'get': self.getEmail,
'set': lambda x: self.setEmail(x, reindex=True)}}
if not hasattr(self, 'authenticatorPersonalData'):
self.authenticatorPersonalData = {}
self.authenticatorPersonalData[field] = value or ''
field_accessors = fields[field]
if value and value != field_accessors['get']() and self.isFieldSynced(field):
field_accessors['set'](value)
self._p_changed = 1
def getAuthenticatorPersonalData(self, field):
if not hasattr(self, 'authenticatorPersonalData'):
self.authenticatorPersonalData = {}
return self.authenticatorPersonalData.get(field)
def clearAuthenticatorPersonalData(self):
self.authenticatorPersonalData = {}
def getLang(self):
try:
return self._lang
except:
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
self._lang = minfo.getLang()
return self._lang
def setLang(self, lang):
self._lang =lang
class AvatarHolder(ObjectHolder):
"""Specialised ObjectHolder dealing with user (avatar) objects. Objects of
this class represent an access point to Avatars of the application and
provides different methods for accessing and retrieving them in several
ways.
"""
idxName = "avatars"
counterName = "PRINCIPAL"
_indexes = [ "email", "name", "surName","organisation", "status" ]
def matchFirstLetter(self, index, letter, onlyActivated=True, searchInAuthenticators=True):
result = {}
if index not in self._indexes:
return None
if index in ["name", "surName", "organisation"]:
match = indexes.IndexesHolder().getById(index).matchFirstLetter(letter, accent_sensitive=False)
else:
match = indexes.IndexesHolder().getById(index).matchFirstLetter(letter)
if match is not None:
for userid in match:
if self.getById(userid) not in result:
av = self.getById(userid)
if not onlyActivated or av.isActivated():
result[av.getEmail()] = av
if searchInAuthenticators:
for authenticator in AuthenticatorMgr().getList():
matches = authenticator.matchUserFirstLetter(index, letter)
if matches:
for email, record in matches.iteritems():
emailResultList = [av.getEmails() for av in result.values()]
if email not in emailResultList:
userMatched = self.match({'email': email}, exact=1, searchInAuthenticators=False)
if not userMatched:
av = Avatar(record)
av.setId(record["id"])
av.status = record["status"]
result[email] = av
else:
av = userMatched[0]
result[av.getEmail()] = av
return result.values()
def match(self, criteria, exact=0, onlyActivated=True, searchInAuthenticators=True):
result = {}
iset = set()
for f, v in criteria.items():
v = str(v).strip()
if v and f in self._indexes:
match = indexes.IndexesHolder().getById(f).matchUser(v, exact=exact, accent_sensitive=False)
if match is not None:
if len(iset) == 0:
iset = set(match)
else:
iset = iset & set(match)
for userid in iset:
av=self.getById(userid)
if not onlyActivated or av.isActivated():
result[av.getEmail()]=av
if searchInAuthenticators:
for authenticator in AuthenticatorMgr().getList():
matches = authenticator.matchUser(criteria, exact=exact)
if matches:
for email, record in matches.iteritems():
emailResultList = [av.getEmails() for av in result.values()]
if not email in emailResultList:
userMatched = self.match({'email': email}, exact=1, searchInAuthenticators=False)
if not userMatched:
av = Avatar(record)
av.setId(record["id"])
av.status = record["status"]
if self._userMatchCriteria(av, criteria, exact):
result[email] = av
else:
av = userMatched[0]
if self._userMatchCriteria(av, criteria, exact):
result[av.getEmail()] = av
return result.values()
def _userMatchCriteria(self, av, criteria, exact):
if criteria.has_key("organisation"):
if criteria["organisation"]:
lMatch = False
for org in av.getOrganisations():
if exact:
if criteria["organisation"].lower() == org.lower():
lMatch = True
else:
if criteria["organisation"].lower() in org.lower():
lMatch = True
if not lMatch:
return False
if criteria.has_key("surName"):
if criteria["surName"]:
if exact:
if not criteria["surName"].lower() == av.getSurName().lower():
return False
else:
if not criteria["surName"].lower() in av.getSurName().lower():
return False
if criteria.has_key("name"):
if criteria["name"]:
if exact:
if not criteria["name"].lower() == av.getName().lower():
return False
else:
if not criteria["name"].lower() in av.getName().lower():
return False
if criteria.has_key("email"):
if criteria["email"]:
lMatch = False
for email in av.getEmails():
if exact:
if criteria["email"].lower() == email.lower():
lMatch = True
else:
if criteria["email"].lower() in email.lower():
lMatch = True
if not lMatch:
return False
return True
def getById(self, id):
try:
return ObjectHolder.getById(self, id)
except:
pass
try:
authId, extId, email = id.split(":")
except:
return None
av = self.match({"email": email}, searchInAuthenticators=False)
if av:
return av[0]
user_data = AuthenticatorMgr().getById(authId).searchUserById(extId)
av = Avatar(user_data)
identity = user_data["identity"](user_data["login"], av)
user_data["authenticator"].add(identity)
av.activateAccount()
self.add(av)
return av
def add(self,av):
"""
Before adding the user, check if the email address isn't used
"""
if av.getEmail() is None or av.getEmail()=="":
raise UserError(_("User not created. You must enter an email address"))
emailmatch = self.match({'email': av.getEmail()}, exact=1, searchInAuthenticators=False)
if emailmatch != None and len(emailmatch) > 0 and emailmatch[0] != '':
raise UserError(_("User not created. The email address %s is already used.")% av.getEmail())
id = ObjectHolder.add(self,av)
for i in self._indexes:
indexes.IndexesHolder().getById(i).indexUser(av)
return id
def mergeAvatar(self, prin, merged):
#replace merged by prin in all object where merged is
links = merged.getLinkedTo()
for objType in links.keys():
if objType == "category":
for role in links[objType].keys():
for cat in set(links[objType][role]):
# if the category has been deleted
if cat.getOwner() == None and cat.getId() != '0':
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(cat, prin.getId(), role))
continue
elif role == "creator":
cat.revokeConferenceCreation(merged)
cat.grantConferenceCreation(prin)
elif role == "manager":
cat.revokeModification(merged)
cat.grantModification(prin)
elif role == "access":
cat.revokeAccess(merged)
cat.grantAccess(prin)
elif role == "favorite":
merged.unlinkTo(cat, 'favorite')
prin.linkTo(cat, 'favorite')
elif objType == "conference":
confHolderIdx = MaKaC.conference.ConferenceHolder()._getIdx()
for role in links[objType].keys():
for conf in set(links[objType][role]):
# if the conference has been deleted
if conf.getId() not in confHolderIdx:
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(conf, prin.getId(), role))
continue
elif role == "creator":
conf.setCreator(prin)
elif role == "chair":
conf.removeChair(merged)
conf.addChair(prin)
elif role == "manager":
conf.revokeModification(merged)
conf.grantModification(prin)
elif role == "access":
conf.revokeAccess(merged)
conf.grantAccess(prin)
elif role == "abstractSubmitter":
conf.removeAuthorizedSubmitter(merged)
conf.addAuthorizedSubmitter(prin)
if objType == "session":
for role in links[objType].keys():
for ses in set(links[objType][role]):
owner = ses.getOwner()
# tricky, as conference containing it may have been deleted
if owner == None or owner.getOwner() == None:
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(ses, prin.getId(), role))
elif role == "manager":
ses.revokeModification(merged)
ses.grantModification(prin)
elif role == "access":
ses.revokeAccess(merged)
ses.grantAccess(prin)
elif role == "coordinator":
ses.removeCoordinator(merged)
ses.addCoordinator(prin)
if objType == "contribution":
for role in links[objType].keys():
for contrib in set(links[objType][role]):
if contrib.getOwner() == None:
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(contrib, prin.getId(), role))
elif role == "manager":
contrib.revokeModification(merged)
contrib.grantModification(prin)
elif role == "access":
contrib.revokeAccess(merged)
contrib.grantAccess(prin)
elif role == "submission":
contrib.revokeSubmission(merged)
contrib.grantSubmission(prin)
if objType == "track":
for role in links[objType].keys():
if role == "coordinator":
for track in set(links[objType][role]):
track.removeCoordinator(merged)
track.addCoordinator(prin)
if objType == "material":
for role in links[objType].keys():
if role == "access":
for mat in set(links[objType][role]):
mat.revokeAccess(merged)
mat.grantAccess(prin)
if objType == "file":
for role in links[objType].keys():
if role == "access":
for mat in set(links[objType][role]):
mat.revokeAccess(merged)
mat.grantAccess(prin)
if objType == "abstract":
for role in links[objType].keys():
if role == "submitter":
for abstract in set(links[objType][role]):
abstract.setSubmitter(prin)
if objType == "registration":
for role in links[objType].keys():
if role == "registrant":
for reg in set(links[objType][role]):
reg.setAvatar(prin)
prin.addRegistrant(reg)
if objType == "alarm":
for role in links[objType].keys():
if role == "to":
for alarm in set(links[objType][role]):
alarm.removeToUser(merged)
alarm.addToUser(prin)
if objType == "group":
for role in links[objType].keys():
if role == "member":
for group in set(links[objType][role]):
group.removeMember(merged)
group.addMember(prin)
if objType == "evaluation":
for role in links[objType].keys():
if role == "submitter":
for submission in set(links[objType][role]):
if len([s for s in submission.getEvaluation().getSubmissions() if s.getSubmitter()==prin]) >0 :
#prin has also answered to the same evaluation as merger's.
submission.setSubmitter(None)
else:
#prin ditn't answered to the same evaluation as merger's.
submission.setSubmitter(prin)
# Merge avatars in redis
if redis_write_client:
avatar_links.merge_avatars(prin, merged)
suggestions.merge_avatars(prin, merged)
# remove merged from holder
self.remove(merged)
idxs = indexes.IndexesHolder()
org = idxs.getById('organisation')
email = idxs.getById('email')
name = idxs.getById('name')
surName = idxs.getById('surName')
status_index = idxs.getById('status')
org.unindexUser(merged)
email.unindexUser(merged)
name.unindexUser(merged)
surName.unindexUser(merged)
status_index.unindexUser(merged)
# add merged email and logins to prin and merge users
for mail in merged.getEmails():
prin.addSecondaryEmail(mail)
for id in merged.getIdentityList(create_identities=True):
id.setUser(prin)
prin.addIdentity(id)
merged.mergeTo(prin)
# reindex prin email
email.unindexUser(prin)
email.indexUser(prin)
def unmergeAvatar(self, prin, merged):
if not merged in prin.getMergeFromList():
return False
merged.mergeTo(None)
idxs = indexes.IndexesHolder()
org = idxs.getById('organisation')
email = idxs.getById('email')
name = idxs.getById('name')
surName = idxs.getById('surName')
email.unindexUser(prin)
for mail in merged.getEmails():
prin.removeSecondaryEmail(mail)
for id in merged.getIdentityList(create_identities=True):
prin.removeIdentity(id)
id.setUser(merged)
self.add(merged)
org.indexUser(merged)
email.indexUser(merged)
name.indexUser(merged)
surName.indexUser(merged)
email.indexUser(prin)
return True
# ToDo: This class should ideally derive from TreeHolder as it is thought to
# be a index over the "Principal" objects i.e. it will be a top indexing of
# the contents of AvatarHolder and GroupHolder. This will allow to
# transparently access to Principal objects from its id. To transparently
# index all the objects AvatarHolder and GroupHolder must override the
# "add" method and, apart from their normal operation, include an adding call
# for the PrincipalHolder.
# The problem is that I have experienced some troubles (it seems not to perform
# the adding of objects) while adding an object both to the AvatarHolder and
# to this one; so, for the time being, I will implement it in a "dirty" and
# non-optimal way to be able to continue working, but the trouble must be
# investigated and a better solution found.
# I'll keep the ObjectHolder interface so it will be easier afterwards to
# implement a more optimised solution (just this object needs to be modified)
class PrincipalHolder:
def __init__(self):
self.__gh = GroupHolder()
self.__ah = AvatarHolder()
def getById(self, id):
try:
prin = self.__gh.getById(id)
return prin
except KeyError, e:
pass
prin = self.__ah.getById(id)
return prin
def match(self, element_id, exact=1, searchInAuthenticators=True):
prin = self.__gh.match({"name": element_id}, searchInAuthenticators=searchInAuthenticators, exact=exact)
if not prin:
prin = self.__ah.match({"login": element_id}, searchInAuthenticators=searchInAuthenticators, exact=exact)
return prin
class LoginInfo:
def __init__(self, login, password):
self.setLogin(login)
self.setPassword(password)
def setLogin(self, newLogin):
self.login = newLogin.strip()
def getLogin(self):
return self.login
def setPassword(self, newPassword):
self.password = newPassword
def getPassword(self):
return self.password
class PersonalInfo(Persistent, Fossilizable):
fossilizes(IPersonalInfoFossil)
def __init__(self):
self._basket = PersonalBasket()
self._showPastEvents = False #determines if past events in category overview will be shown
self._p_changed = 1
def getShowPastEvents(self):
if not hasattr(self, "_showPastEvents"):
self._showPastEvents = False
return self._showPastEvents
def setShowPastEvents(self, value):
self._showPastEvents = value
def getBasket(self):
return self._basket
class PersonalBasket(Persistent):
# Generic basket, for Events, Categories, Avatars, Groups and Rooms
def __init__(self):
self._events = {}
self._categories = {}
self._rooms = {}
self._users = {}
self._userGroups = {}
self._p_changed = 1
def __findDict(self, element):
if (type(element) == MaKaC.conference.Conference):
return self._events
elif (type(element) == MaKaC.conference.Category):
return self._categories
elif (type(element) == Avatar):
return self._users
elif (type(element) == Group):
return self._userGroups
elif (type(element) == MaKaC.rb_location.RoomGUID):
return self._rooms
else:
raise Exception(_("Unknown Element Type"))
def addElement(self, element):
basket = self.__findDict(element)
if element.getId() not in basket:
basket[element.getId()] = element
self._p_changed = 1
return True
return False
def deleteElement(self, element=None):
res = self.__findDict(element).pop(element.getId(), None)
if res == None:
return False
self._p_changed = 1
return True
def deleteUser(self, user_id):
res = self._users.pop(user_id, None)
self._p_changed = 1
return res is not None
def hasElement(self, element):
return element.getId() in self.__findDict(element)
def hasUserId(self, id):
return self._users.has_key(id)
def getUsers(self):
return self._users
| gpl-3.0 | -3,996,122,456,246,659,600 | 34.671633 | 123 | 0.549794 | false | 4.354503 | false | false | false |
eomahony/Numberjack | examples/Tsccd.py | 1 | 3117 | from Numberjack import *
def get_model(k, v, n):
design = Matrix(v, n)
pairs = Matrix(v * (v - 1) // 2, n)
index = [[0 for i in range(v)] for j in range(v)]
a = 0
for i in range(v - 1):
for j in range(i + 1, v):
index[i][j] = a
index[j][i] = a
a += 1
pair_occurrence = VarArray(v * (v - 1) // 2, 1, v - k)
first = VarArray(v * (v - 1) // 2, n)
last = VarArray(v * (v - 1) // 2, n)
model = Model(
## each block is a k-tuple
[Sum(col) == k for col in design.col],
## exactly one change between each block
[Sum([design[i][j - 1] > design[i][j] for i in range(v)]) == 1 for j in range(1, n)],
[Sum([design[i][j - 1] < design[i][j] for i in range(v)]) == 1 for j in range(1, n)],
## each pair can occur between 1 and v-k times
[pairs[index[i][j]][x] == (design[i][x] & design[j][x]) for i in range(v) for j in range(i) for x in range(n)],
[pair_occurrence[index[i][j]] == Sum(pairs[index[i][j]])],
## consecutive ones (convex rows)
[pairs[index[i][j]][x] <= (first[index[i][j]] <= x) for i in range(v) for j in range(i) for x in range(n)],
[pairs[index[i][j]][x] <= (last[index[i][j]] >= x) for i in range(v) for j in range(i) for x in range(n)],
[((first[index[i][j]] <= x) & (x <= last[index[i][j]])) <= pairs[index[i][j]][x] for x in range(n) for i in range(v) for j in range(i)],
[first[index[i][j]] <= last[index[i][j]] for i in range(v) for j in range(i)],
# implied constraint (we know the number of pairs in in each column)
[Sum(col) == (k*(k-1)//2) for col in pairs.col],
## symmetry breaking
[design[i][0] == 1 for i in range(k)],
design[k-1][1] == 0,
design[k][1] == 1,
)
return first, pairs, last, design, index, model
def solve(param):
k = param['k']
v = param['v']
n = (v * (v - 1) // 2 - k * (k - 1) // 2) // (k - 1) + 1
first, pairs, last, design, index, model = get_model(k, v, n)
solver = model.load(param['solver'])
solver.setHeuristic('DomainOverWDegree', 'Random', 1)
solver.setVerbosity(param['verbose'])
solver.setTimeLimit(param['tcutoff'])
solver.solve()
out = ''
if solver.is_sat():
out += str(design)+'\n'
for i in range(v-1):
for j in range(i + 1, v):
out += str((i, j)).ljust(5) + ' ' + str(first[index[i][j]]) + ' ' + str(pairs[index[i][j]]) + ' ' + str(last[index[i][j]]) + '\n'
the_design = [[] for y in range(n)]
for y in range(n):
for x in range(v):
if design[x][y].get_value() > 0:
the_design[y].append(x)
for x in range(k):
for y in range(n):
out += (str(the_design[y][x]+1).rjust(2) + ' ')
out += '\n'
out += '\nNodes: ' + str(solver.getNodes())
return out
default = {'k': 3, 'v': 6, 'solver': 'MiniSat', 'verbose': 0, 'tcutoff': 30}
if __name__ == '__main__':
param = input(default)
print(solve(param))
| lgpl-2.1 | 2,552,755,813,298,700,300 | 32.880435 | 145 | 0.494386 | false | 2.910364 | false | false | false |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_06_01/aio/_configuration.py | 1 | 3182 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClientConfiguration(Configuration):
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(PolicyClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit | -3,418,886,602,235,532,300 | 46.492537 | 134 | 0.678504 | false | 4.526316 | true | false | false |
pdear/verilib | pytools/vlparse/types/primary.py | 1 | 5043 | #
# primary.py - Primary verilog syntax tree types
#
# Verilib - A Verilog HDL development framework
# Copyright (c) 2014, Patrick Dear, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
import re
from ..tokens import Tokens
from ..errors import print_error, VlSyntaxError
class SyntaxNode:
""" A generic syntactical node in our parse tree """
def __init__(self):
self.children = []
@staticmethod
def parse(self, tkns):
raise NotImplementedError()
def to_vl_string(self):
""" Transform back into a verilog string """
raise NotImplementedError("to_vl_string not implemented for type " +
str(self.__class__))
class NetType(SyntaxNode):
def __init__(self, nettype=""):
SyntaxNode.__init__(self)
self.nettype = nettype
@staticmethod
def parse(tkns):
""" Parse a net type. """
nettypes = (Tokens.KW_WIRE, Tokens.KW_SUPPLY0, Tokens.KW_SUPPLY1,
Tokens.KW_TRI, Tokens.KW_TRIAND, Tokens.KW_TRIOR, Tokens.KW_TRI0,
Tokens.KW_TRI1, Tokens.KW_WAND, Tokens.KW_WOR)
for t in nettypes:
if tkns.accept(t):
return NetType(t.name)
return None
class Number(SyntaxNode):
""" Represents any kind of verilog number """
def __init__(self, numtype=int, width=32, value=0, base=""):
SyntaxNode.__init__(self)
self.numtype = numtype
self.width = width
self.value = value
self.base = ""
def to_vl_string(self):
""" Convert back into verilog literal """
if self.numtype != int:
raise Exception("Implement me!")
if self.base == "":
return str(self.value)
raise Exception("Implement me!")
@staticmethod
def parse(tkns):
""" Parse an immediate number """
t = tkns.current()
if tkns.accept(Tokens.INTEGER):
return Number(value=int(t.text))
elif tkns.accept(Tokens.DECIMAL_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.BINARY_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.OCTAL_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.HEX_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.REAL):
raise Exception("Implement me!")
else:
return None
class Identifier(SyntaxNode):
""" Represents any kind of idendifier """
def __init__(self, identifier):
SyntaxNode.__init__(self)
self.identifier = identifier
def to_vl_string(self):
return self.identifier
@staticmethod
def parse(tkns):
""" Super easy """
t = tkns.current().text
tkns.expect(Tokens.IDENTIFIER)
return Identifier(t)
class UnaryOperator(SyntaxNode):
def __init__(self, op, child=None):
SyntaxNode__init__(self)
self.op = op
self.children = [child] if child != None else []
@staticmethod
def parse(tkns):
""" Parse any sort of unary operator """
unary_ops = (Tokens.OP_PLUS, Tokens.OP_MINUS, Tokens.OP_BANG,
Tokens.OP_TILDE, Tokens.OP_TILDEAND, Tokens.OP_AND,
Tokens.OP_TILDEBAR, Tokens.OP_BAR, Tokens.OP_HAT,
Tokens.OP_TILDEHAT, Tokens.OP_HATTILDE)
for op in unary_ops:
if tkns.accept(op):
return UnaryOperator(op.name)
return None
class BinaryOperator(SyntaxNode):
def __init__(self, op, children=[]):
SyntaxNode.__init__(self)
self.op = op
self.children = children
@staticmethod
def parse(tkns):
""" Parse a binary operator """
binary_ops = (Tokens.OP_PLUS, Tokens.OP_MINUS, Tokens.OP_STAR,
Tokens.OP_SLASH, Tokens.OP_PERCENT, Tokens.OP_EQEQ,
Tokens.OP_NEQ, Tokens.OP_EQEQEQ, Tokens.OP_NEQEQ,
Tokens.OP_ANDAND, Tokens.OP_BARBAR, Tokens.OP_STARSTAR,
Tokens.OP_LT, Tokens.OP_LEQ, Tokens.OP_GT, Tokens.OP_GEQ,
Tokens.OP_AND, Tokens.OP_BAR, Tokens.OP_HAT, Tokens.OP_TILDEHAT,
Tokens.OP_HATTILDE, Tokens.OP_GTGT, Tokens.OP_LTLT,
Tokens.OP_GTGTGT, Tokens.OP_LTLTLT)
for op in binary_ops:
if tkns.accept(op):
m = re.match(r"'?([^']+)'?", op.name)
return BinaryOperator(m.group(1))
return None
| lgpl-3.0 | -3,968,423,585,559,523,000 | 32.845638 | 77 | 0.611541 | false | 3.656998 | false | false | false |
felipet/fca3103_pytool | fca3103_tool.py | 1 | 5035 | #! /usr/bin/env python3
# -*- coding: utf-8 -*
'''
Terminal tool to make Time Interval measures using the Tektronix FCA3103
@file
@date Created on Sep. 16, 2015
@author Felipe Torres (torresfelipex1<AT>gmail.com)
@copyright LGPL v2.1
'''
# ----------------------------------------------------------------------------|
# GNU LESSER GENERAL PUBLIC LICENSE |
# ------------------------------------ |
# This source file is free software; you can redistribute it and/or modify it |
# under the terms of the GNU Lesser General Public License as published by the|
# Free Software Foundation; either version 2.1 of the License, or (at your |
# option) any later version. This source is distributed in the hope that it |
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warrant |
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser |
# General Public License for more details. You should have received a copy of |
# the GNU Lesser General Public License along with this source; if not, |
# download it from http://www.gnu.org/licenses/lgpl-2.1.html |
# ----------------------------------------------------------------------------|
# -----------------------------------------------------------------------------
# Import --
# -----------------------------------------------------------------------------
import datetime
import argparse as arg
from subprocess import check_output
from FCA3103 import FCA3103
def main() :
'''
Tool for automatize the control of Tektronix FCA3103 Timer/Counter
'''
parser = arg.ArgumentParser(description='Tektronix FCA3103 tool')
parser.add_argument('--function', '-f', help='Measuring Function', choices=['mtint','tint'],\
required=True)
parser.add_argument('--interval', '-t', help='Time between samples', type=int)
parser.add_argument('--samples', '-s', help='Number of samples', type=int, \
default=1)
parser.add_argument('--debug', '-d', help="Enable debug output", action="store_true", \
default=False)
parser.add_argument('--device', '-l', help="Device port", type=int, default=1)
parser.add_argument('--output', '-o', help='Output data file', type=str)
parser.add_argument('--ref', '-r', help='Input channel for the reference',type=int, \
choices=[1,2],default=1)
parser.add_argument('--trigl','-g',help='Input trigger level', type=float, \
default=1.5)
parser.add_argument('--skip','-i',help='Ignore values far from mean plus error',type=int, \
default=0)
parser.add_argument('--tstamp','-x', help='Add timestamping for each measure',action="store_true", \
default=False)
args = parser.parse_args()
valid_port = False
ports = check_output(["""ls /dev | grep usbtmc"""],shell=True)[:-1]
for p in ports.splitlines():
p = p.decode('utf-8')
if int(p[-1]) == args.device:
valid_port = True
if not valid_port:
print("No device found at /dev/usbtmc%d" % (args.device))
exit(6) # No such device or address
device = FCA3103(args.device, args.ref, 2 if args.ref == 1 else 1)
device.show_dbg = args.debug
device.t_samples = args.interval
device.n_samples = args.samples
device.skip_values = True if args.skip > 0 else False
device.error = args.skip
# TODO: Add de posibility of using different trigger values for the inputs
device.trig_level[0] = device.trig_level[1] = args.trigl
# try:
if args.function == 'mtint':
print("Measuring Mean Time Interval between the inputs (%d secs)..." % (args.samples))
mean = device.mean_time_interval(args.samples, args.interval)
print("Mean Time Interval for %d samples: %g" % (args.samples, mean))
elif args.function == 'tint':
print("Measuring Time Interval between the inputs (%d secs)..." % (args.samples+10))
values = device.time_interval(args.samples, tstamp=args.tstamp)
if args.output:
with open(args.output,'a+') as file:
file.write("# Time Interval Measurement (%d samples) with Tektronix FCA3103 (50ps)\n" % args.samples)
file.write("# %s\n" % datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
for v in values:
if args.tstamp:
file.write("%g\t%g\n" % (v[0], v[1]))
else:
file.write(str(v))
file.write("\n")
print("Output writed to '%s'" % (args.output))
else:
print("Time Interval Measurement (%d samples) with Tektronix FCA3103 (50ps)" % args.samples)
print("%s\n" % datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
for v in values:
print(v)
# except Exception as e:
# print(e)
if __name__ == "__main__" :
main()
| gpl-2.0 | -1,453,097,992,371,775,000 | 44.772727 | 117 | 0.562066 | false | 3.86416 | false | false | false |
qunying/gps | share/support/core/gcov.py | 1 | 7246 | """ Provides the "Tools/Gcov/Compute coverage files" and "Remove coverage
files" menus, which executes gcov automatically.
This script will also perform checks along the way to guide through the
procedure of obtaining gcov info.
The output of the gcov process is displayed in a separate console.
At the end of the processing, the open editors are decorated with coverage
information.
Note that GPS calls gcov so that the .gcov files are generated
- in the directory pointed to by the "GCOV_ROOT" environment variable, or
- in the object directory of the root project, if this variable is not set
"""
###########################################################################
# No user customization below this line
###########################################################################
import GPS
import os
import re
from gps_utils import interactive
from GPS import MDI, Project, Process, CodeAnalysis
# A class to display the output of gcov in a separate console.
class Gcov_Process (GPS.Console, GPS.Process):
def on_output(self, unmatched, matched):
self.write(unmatched + matched)
def on_exit(self, status, remaining_output):
self.write(remaining_output)
if status == 0:
self.write("process terminated successfully")
else:
self.write("process terminated [" + str(status) + "]")
# Show coverage report
analysis = CodeAnalysis.get("Coverage")
if GPS.Project.root().is_harness_project():
original = GPS.Project.root().original_project().file()
analysis.add_gcov_project_info(original)
else:
analysis.add_all_gcov_project_info()
analysis.show_analysis_report()
self.kill()
def on_input(self, input):
self.send(input)
def on_destroy(self):
self.kill()
def __init__(self, process, args="", directory=""):
GPS.Console.__init__(self, "Executing gcov",
on_input=Gcov_Process.on_input,
on_destroy=Gcov_Process.on_destroy,
force=True)
GPS.Process.__init__(self, process + ' ' + args, ".+",
remote_server="Build_Server",
directory=directory,
on_exit=Gcov_Process.on_exit,
on_match=Gcov_Process.on_output)
def using_gcov(context):
return GPS.Preference('Coverage-Toolchain').get() == 'Gcov'
@interactive(name='gcov compute coverage files',
filter=using_gcov)
def run_gcov():
"Run gcov to generate the coverage files"
# Verify that the version of gcov is recent enough to support response
# files and reading of .gc?? data in multiple directories.
try:
p = Process("gcov --version")
out = p.get_result()
p = re.compile("[1-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]")
found = p.findall(out)
if not found:
MDI.dialog("Could not find a date in the output of gcov.")
else:
date = found[0]
if date < 20071005:
MDI.dialog("Your version of gcov is dated " + str(date) +
".\nThis plugin requires gcov for GNAT dated " +
"20071005 or later.")
return
except:
MDI.dialog("""Could not read gcov version number.
Make sure you are using gcov for GNAT dated 20071005 or later.""")
# Determine the root project
root_project = Project.root()
# Determine where to create the gcov info
GCOV_ROOT = os.getenv("GCOV_ROOT")
if not GCOV_ROOT:
root_object_dirs = root_project.object_dirs(False)
if not root_object_dirs:
MDI.dialog("""The root project does not have an object directory.
Please add one, or set the enviroment variable GCOV_ROOT to
the directory where you would like the gcov files to be
generated.""")
return
else:
gcov_dir = root_object_dirs[0]
else:
gcov_dir = GCOV_ROOT
if not os.access(gcov_dir, os.R_OK and os.W_OK):
MDI.dialog(""" Could not access the directory:
""" + gcov_dir + """
Please point the environment variable GCOV_ROOT to a directory
on which you have permission to read and write.
""")
input_file = os.path.abspath(os.path.join(gcov_dir, "gcov_input.txt"))
# List all the projects
projects = root_project.dependencies(True)
# List all object dirs
object_dirs = root_project.object_dirs(True)
# Write the response file
res = file(input_file, 'wb')
gcda_file_found = False
gcno_file_found = False
for p in projects:
sources = p.sources(False)
for s in sources:
n = s.path
basename = n[max(n.rfind('\\'), n.rfind('/')) + 1:len(n)]
unit = basename[0:basename.rfind('.')]
for object_dir in object_dirs:
gcda = object_dir + os.sep + unit + ".gcda"
# If we have not yet found at least one .gcno file, attempt to
# find one. This is to improve the precision of error messages,
# and detect the case where compilation was successful but the
# executable has never been run.
if not gcno_file_found:
gcno = object_dir + os.sep + unit + ".gcno"
if os.access(gcno, os.F_OK):
gcno_file_found = True
if os.access(gcda, os.F_OK):
gcda_file_found = True
# Write one entry in response file
# Escape all backslashes.
gcda = gcda.replace('\\', '\\\\')
res.write('"' + gcda + '"' + "\n")
break
res.close()
file(input_file).read()
if not gcno_file_found:
# No gcno file was found: display an appropriate message.
MDI.dialog(""" No ".gcno" file was found in any of the object directories.
Make sure you have compiled the sources of interest with
the "Code coverage" flags.""")
else:
if not gcda_file_found:
# Some gcno files were found, but no gcna files.
MDI.dialog(""" No ".gcda" file was found in any of the object directories.
Make sure you have run the executable(s) at least once.
""")
else:
# Run gcov
Gcov_Process("gcov", "@%s" % input_file, directory=gcov_dir)
@interactive(name='gcov remove coverage files',
filter=using_gcov)
def remove_gcov():
"Cleanup the gcov coverage files"
if not MDI.yes_no_dialog(
"This will remove all .gcov and .gcda files, are you sure ?"):
return
# Look in all the projects
for p in Project.root().dependencies(True):
object_dirs = p.object_dirs(False)
if len(object_dirs) > 0:
object_dir = object_dirs[0]
# Browse in the object dirs
for f in os.listdir(object_dir):
# if f is a .gcda or a .gcov, remove it
if f.find(".gcda") != -1 or f.find(".gcov") != -1:
os.remove(object_dir + os.sep + f)
| gpl-3.0 | -8,241,168,727,739,678,000 | 31.78733 | 86 | 0.569418 | false | 3.976948 | false | false | false |
cproctor/hex | server/hexserver/hexserver/models/spell.py | 1 | 3379 | from db import db_connection
from user import _user_name_exists, _authenticate_user
import json
import time
import logging
log = logging.getLogger(__name__)
def get_spells(request):
conn = db_connection(request)
cursor = conn.cursor()
cursor.execute("SELECT * FROM spells;")
result = cursor.fetchall()
conn.close()
return result
def get_current_spells(request):
conn = db_connection(request)
cursor = conn.cursor()
currentSpells = _get_current_spells(cursor)
conn.close()
return currentSpells
def get_spell_by_time(request, castTime):
conn = db_connection(request)
cursor = conn.cursor()
spell = _get_spell_by_time(cursor, castTime)
conn.close()
return spell
def _get_spell_by_time(cursor, castTime):
cursor.execute("SELECT * FROM spells WHERE cast_time = ?", (castTime,))
return cursor.fetchone()
def _get_current_spells(cursor):
cursor.execute("SELECT * FROM spells WHERE complete = 0 ORDER BY cast_time")
current = cursor.fetchone()
upcoming = cursor.fetchall()
return {
"current": current,
"upcoming":upcoming
}
def create_spell(request, params):
conn = db_connection(request)
cursor = conn.cursor()
spellTime = int(time.time())
# We use spellTime as a primary key. So if we should happen to get two spells
# at the same second, pretend like the second came a second later.
while _get_spell_by_time(cursor, spellTime):
spellTime += 1
try:
assert(_authenticate_user(params['user_name'], params['spirit_animal'],
cursor))
assert(isinstance(params['name'], basestring))
assert(params['name'] != '')
assert(params['setup'] or params['loop'])
for component in ['setup', 'loop']:
if params[component]:
for frame in params[component]:
try:
assert(validate_frame(frame))
except:
log.debug(frame)
raise AssertionError()
except IOError():
return False
setup = json.dumps(params['setup']) if params['setup'] else ''
loop = json.dumps(params['loop']) if params['loop'] else ''
cursor.execute('INSERT INTO spells VALUES (?,?,?,?,?,?,?)', (
params['user_name'],
params['name'],
3,
spellTime,
setup,
loop,
0
))
conn.commit()
newSpell = _get_spell_by_time(cursor, spellTime)
conn.close()
return newSpell
def mark_spell_complete(request, castTime):
conn = db_connection(request)
cursor = conn.cursor()
result = _mark_spell_complete(cursor, castTime)
conn.commit()
conn.close()
return result
def _mark_spell_complete(cursor, castTime):
cursor.execute("UPDATE spells SET complete = ? WHERE cast_time = ?", (1, castTime))
return cursor.fetchone()
def validate_frame(frame):
try:
assert isinstance(frame, list)
for layer in frame:
assert isinstance(layer, list)
assert len(layer) == 2
colors, bulbs = layer
assert len(colors) == 4
for color in colors:
assert isinstance(color, int)
for bulb in bulbs:
assert isinstance(bulb, int)
except:
return False
return True
| mit | -1,238,376,574,511,584,300 | 29.441441 | 87 | 0.600474 | false | 3.897347 | false | false | false |
great-expectations/great_expectations | tests/datasource/test_sqlalchemy_datasource.py | 1 | 10417 | import os
from unittest import mock
import pandas as pd
import pytest
from ruamel.yaml import YAML
import great_expectations.dataset.sqlalchemy_dataset
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.dataset import SqlAlchemyDataset
from great_expectations.datasource import SqlAlchemyDatasource
from great_expectations.validator.validator import BridgeValidator, Validator
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
yaml = YAML()
def test_sqlalchemy_datasource_custom_data_asset(
data_context_parameterized_expectation_suite, test_db_connection_string
):
name = "test_sqlalchemy_datasource"
class_name = "SqlAlchemyDatasource"
data_asset_type_config = {
"module_name": "custom_sqlalchemy_dataset",
"class_name": "CustomSqlAlchemyDataset",
}
data_context_parameterized_expectation_suite.add_datasource(
name,
class_name=class_name,
credentials={"connection_string": test_db_connection_string},
data_asset_type=data_asset_type_config,
batch_kwargs_generators={
"default": {"class_name": "TableBatchKwargsGenerator"}
},
)
# We should now see updated configs
with open(
os.path.join(
data_context_parameterized_expectation_suite.root_directory,
"great_expectations.yml",
),
) as data_context_config_file:
data_context_file_config = yaml.load(data_context_config_file)
assert (
data_context_file_config["datasources"][name]["data_asset_type"]["module_name"]
== "custom_sqlalchemy_dataset"
)
assert (
data_context_file_config["datasources"][name]["data_asset_type"]["class_name"]
== "CustomSqlAlchemyDataset"
)
# We should be able to get a dataset of the correct type from the datasource.
data_context_parameterized_expectation_suite.create_expectation_suite("table_1.boo")
batch = data_context_parameterized_expectation_suite.get_batch(
data_context_parameterized_expectation_suite.build_batch_kwargs(
"test_sqlalchemy_datasource", "default", "table_1"
),
"table_1.boo",
)
assert type(batch).__name__ == "CustomSqlAlchemyDataset"
res = batch.expect_column_func_value_to_be("col_1", 1)
assert res.success is True
def test_standalone_sqlalchemy_datasource(test_db_connection_string, sa):
datasource = SqlAlchemyDatasource(
"SqlAlchemy",
connection_string=test_db_connection_string,
echo=False,
batch_kwargs_generators={
"default": {"class_name": "TableBatchKwargsGenerator"}
},
)
assert set(datasource.get_available_data_asset_names()["default"]["names"]) == {
("main.table_1", "table"),
("main.table_2", "table"),
}
batch_kwargs = datasource.build_batch_kwargs("default", "main.table_1")
batch = datasource.get_batch(batch_kwargs=batch_kwargs)
assert isinstance(batch, Batch)
batch_data = batch.data
assert isinstance(
batch_data,
great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference,
)
dataset = SqlAlchemyDataset(**batch.data.get_init_kwargs())
assert len(dataset.head(10)) == 5
def test_create_sqlalchemy_datasource(data_context_parameterized_expectation_suite, sa):
name = "test_sqlalchemy_datasource"
# type_ = "sqlalchemy"
class_name = "SqlAlchemyDatasource"
# Use sqlite so we don't require postgres for this test.
connection_kwargs = {"credentials": {"drivername": "sqlite"}}
# It should be possible to create a sqlalchemy source using these params without
# saving substitution variables
data_context_parameterized_expectation_suite.add_datasource(
name, class_name=class_name, **connection_kwargs
)
data_context_config = data_context_parameterized_expectation_suite.get_config()
assert name in data_context_config["datasources"]
assert data_context_config["datasources"][name]["class_name"] == class_name
# We should be able to get it in this session even without saving the config
source = data_context_parameterized_expectation_suite.get_datasource(name)
assert isinstance(source, SqlAlchemyDatasource)
var_name = "test_sqlalchemy_datasource"
data_context_parameterized_expectation_suite.save_config_variable(
var_name, connection_kwargs["credentials"]
)
# But we should be able to add a source using a substitution variable
name = "second_source"
data_context_parameterized_expectation_suite.add_datasource(
name, class_name=class_name, credentials="${" + var_name + "}"
)
data_context_config = data_context_parameterized_expectation_suite.get_config()
assert name in data_context_config["datasources"]
assert data_context_config["datasources"][name]["class_name"] == class_name
assert (
data_context_config["datasources"][name]["credentials"] == "${" + var_name + "}"
)
source = data_context_parameterized_expectation_suite.get_datasource(name)
assert isinstance(source, SqlAlchemyDatasource)
# Finally, we should be able to confirm that the folder structure is as expected
with open(
os.path.join(
data_context_parameterized_expectation_suite.root_directory,
"uncommitted/config_variables.yml",
),
) as credentials_file:
substitution_variables = yaml.load(credentials_file)
assert substitution_variables == {
var_name: dict(**connection_kwargs["credentials"])
}
def test_sqlalchemy_source_templating(sqlitedb_engine):
datasource = SqlAlchemyDatasource(
engine=sqlitedb_engine,
batch_kwargs_generators={"foo": {"class_name": "QueryBatchKwargsGenerator"}},
)
generator = datasource.get_batch_kwargs_generator("foo")
generator.add_query(data_asset_name="test", query="select 'cat' as ${col_name};")
batch = datasource.get_batch(
generator.build_batch_kwargs(
"test", query_parameters={"col_name": "animal_name"}
)
)
dataset = BridgeValidator(
batch,
expectation_suite=ExpectationSuite("test"),
expectation_engine=SqlAlchemyDataset,
).get_dataset()
res = dataset.expect_column_to_exist("animal_name")
assert res.success is True
res = dataset.expect_column_values_to_be_in_set("animal_name", ["cat"])
assert res.success is True
def test_sqlalchemy_source_limit(sqlitedb_engine):
df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]})
df2 = pd.DataFrame({"col_1": [0, 1, 2, 3, 4], "col_2": ["b", "c", "d", "e", "f"]})
df1.to_sql(name="table_1", con=sqlitedb_engine, index=True)
df2.to_sql(name="table_2", con=sqlitedb_engine, index=True, schema="main")
datasource = SqlAlchemyDatasource("SqlAlchemy", engine=sqlitedb_engine)
limited_batch = datasource.get_batch({"table": "table_1", "limit": 1, "offset": 2})
assert isinstance(limited_batch, Batch)
limited_dataset = BridgeValidator(
limited_batch,
expectation_suite=ExpectationSuite("test"),
expectation_engine=SqlAlchemyDataset,
).get_dataset()
assert limited_dataset._table.name.startswith(
"ge_tmp_"
) # we have generated a temporary table
assert len(limited_dataset.head(10)) == 1 # and it is only one row long
assert limited_dataset.head(10)["col_1"][0] == 3 # offset should have been applied
def test_sqlalchemy_datasource_query_and_table_handling(sqlitedb_engine):
# MANUALLY SET DIALECT NAME FOR TEST
datasource = SqlAlchemyDatasource("SqlAlchemy", engine=sqlitedb_engine)
with mock.patch(
"great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference.__init__",
return_value=None,
) as mock_batch:
datasource.get_batch({"query": "select * from foo;"})
mock_batch.assert_called_once_with(
engine=sqlitedb_engine, schema=None, query="select * from foo;", table_name=None
)
# Normally, we do not allow both query and table_name
with mock.patch(
"great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference.__init__",
return_value=None,
) as mock_batch:
datasource.get_batch({"query": "select * from foo;", "table_name": "bar"})
mock_batch.assert_called_once_with(
engine=sqlitedb_engine, schema=None, query="select * from foo;", table_name=None
)
# Snowflake should require query *and* snowflake_transient_table
sqlitedb_engine.dialect.name = "snowflake"
with mock.patch(
"great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference.__init__",
return_value=None,
) as mock_batch:
datasource.get_batch(
{"query": "select * from foo;", "snowflake_transient_table": "bar"}
)
mock_batch.assert_called_once_with(
engine=sqlitedb_engine,
schema=None,
query="select * from foo;",
table_name="bar",
)
def test_sqlalchemy_datasource_processes_dataset_options(test_db_connection_string):
datasource = SqlAlchemyDatasource(
"SqlAlchemy", credentials={"url": test_db_connection_string}
)
batch_kwargs = datasource.process_batch_parameters(
dataset_options={"caching": False}
)
batch_kwargs["query"] = "select * from table_1;"
batch = datasource.get_batch(batch_kwargs)
validator = BridgeValidator(batch, ExpectationSuite(expectation_suite_name="foo"))
dataset = validator.get_dataset()
assert dataset.caching is False
batch_kwargs = datasource.process_batch_parameters(
dataset_options={"caching": True}
)
batch_kwargs["query"] = "select * from table_1;"
batch = datasource.get_batch(batch_kwargs)
validator = BridgeValidator(batch, ExpectationSuite(expectation_suite_name="foo"))
dataset = validator.get_dataset()
assert dataset.caching is True
batch_kwargs = {
"query": "select * from table_1;",
"dataset_options": {"caching": False},
}
batch = datasource.get_batch(batch_kwargs)
validator = BridgeValidator(batch, ExpectationSuite(expectation_suite_name="foo"))
dataset = validator.get_dataset()
assert dataset.caching is False
| apache-2.0 | -650,419,896,327,268,100 | 37.724907 | 90 | 0.679274 | false | 3.822752 | true | false | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/conf/locale/da/formats.py | 1 | 1035 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', # '25.10.2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit | 4,936,223,680,368,423,000 | 33.689655 | 77 | 0.590338 | false | 2.8125 | false | true | false |
F5Networks/f5-common-python | f5/bigip/tm/asm/policies/response_pages.py | 1 | 2162 | # coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from f5.bigip.resource import AsmResource
from f5.bigip.resource import Collection
from f5.sdk_exception import UnsupportedOperation
class Response_Pages_s(Collection):
"""BIG-IP® ASM Response Pages sub-collection."""
def __init__(self, policy):
super(Response_Pages_s, self).__init__(policy)
self._meta_data['object_has_stats'] = False
self._meta_data['minimum_version'] = '11.6.0'
self._meta_data['allowed_lazy_attributes'] = [Response_Page]
self._meta_data['required_json_kind'] = 'tm:asm:policies:response-pages:response-pagecollectionstate'
self._meta_data['attribute_registry'] = {
'tm:asm:policies:response-pages:response-pagestate': Response_Page
}
class Response_Page(AsmResource):
"""BIG-IP® ASM Response Page resource."""
def __init__(self, response_pages_s):
super(Response_Page, self).__init__(response_pages_s)
self._meta_data['required_json_kind'] = 'tm:asm:policies:response-pages:response-pagestate'
def create(self, **kwargs):
"""Create is not supported for Response Page resources
:raises: UnsupportedOperation
"""
raise UnsupportedOperation(
"%s does not support the create method" % self.__class__.__name__
)
def delete(self, **kwargs):
"""Delete is not supported for Response Page resources
:raises: UnsupportedOperation
"""
raise UnsupportedOperation(
"%s does not support the delete method" % self.__class__.__name__
)
| apache-2.0 | -1,259,872,560,900,174,800 | 36.894737 | 109 | 0.674074 | false | 4.007421 | false | false | false |
itsvismay/ElasticBodies | Pipeline/Optimizations/cobyla_beam_heightwidthlength_test.py | 1 | 2030 | from scipy.optimize import fmin_cobyla
import sys, os, subprocess, numpy
P, E = 1000.0, 69e9 # N, Pa, m, m
fileName = 'optimizeTest.txt'
resultName = '../TestsResults/opt.txt'
def objective(x):
height = x[0]
width = x[1]
length = x[2]
volume = length * width * height
return volume
def g0(x):
height = 0.0
width = 0.0
length = 0.0
if type(x) is numpy.ndarray:
height = x[0]
width = x[1]
length = x[2]
else:
height = x[0]
width = x[1]
length = x[2]
print 'Calculating for Height, Width, Length:', height, width, length, '\n'
# fail-safes
if height <= 0.0 or width <= 0.0 or length <= 0.0:
return -100
file_write = open(fileName, 'w')
file_write.write(fileName + ".scad "+str(length)+" "+str(width*1000)+" "+str(height*1000))
file_write.close()
subprocess.check_output(['python', 'pipeline.py', '--template', 'templateBeam.py', '--batch', fileName, '--sConfig', 'slic3rConfig.ini', '--ind', str(height* 1000 + width * 1000 + length * 1000), '-c'])
# read results from file and return those
opt = open(resultName)
for line in opt.readlines():
curLine = line.strip().split(' ')
disp = float(curLine[0]) / 1000
I = width * height**3 / 12
tip_disp = (P * length**3)/(3*E*I)
print 'Displacement for Height, Width, Length', height, width, length 'is ::', disp
print 'Analytical Disp for Height, Width, Length', height, width, length, 'is ::', tip_disp, '\n'
return 1e-4 - (float(curLine[0]) / -1000)
return -1000000
def g1(x):
# height > 0.01 m (10 mm)
if x[0] > 0.01 and x[1] > 0.01 and x[2] > 0.01:
return 1
return -1
def g2(x):
# height < 0.5 m (500 mm)
if x[0] < 0.5 and x[1] < 0.5 and x[2] < 0.5
return 1
return -1
h0 = [0.02, 0.02, 0.02] # 20 mm
constraints = [g0, g1, g2]
h_opt = fmin_cobyla(objective, h0, constraints, rhoend=1e-6, maxfun=100, catol=1e-6)
print h_opt, objective(h_opt), g0(h_opt)
| mpl-2.0 | -2,741,182,944,111,957,500 | 28.42029 | 206 | 0.575369 | false | 2.847125 | false | false | false |
rr-/docstring_parser | docstring_parser/numpydoc.py | 1 | 10211 | """Numpydoc-style docstring parsing.
.. seealso:: https://numpydoc.readthedocs.io/en/latest/format.html
"""
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
DocstringStyle,
)
def _pairwise(iterable: T.Iterable, end=None) -> T.Iterable:
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=end)
def _clean_str(string: str) -> T.Optional[str]:
string = string.strip()
if len(string) > 0:
return string
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
# numpydoc format has no formal grammar for this,
# but we can make some educated guesses...
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
class Section:
"""Numpydoc section parser.
:param title: section title. For most sections, this is a heading like
"Parameters" which appears on its own line, underlined by
en-dashes ('-') on the following line.
:param key: meta key string. In the parsed ``DocstringMeta`` instance this
will be the first element of the ``args`` attribute list.
"""
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
@property
def title_pattern(self) -> str:
"""Regular expression pattern matching this section's header.
This pattern will match this instance's ``title`` attribute in
an anonymous group.
"""
return r"^({})\s*?\n{}\s*$".format(self.title, "-" * len(self.title))
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
"""Parse ``DocstringMeta`` objects from the body of this section.
:param text: section body text. Should be cleaned with
``inspect.cleandoc`` before parsing.
"""
yield DocstringMeta([self.key], description=_clean_str(text))
class _KVSection(Section):
"""Base parser for numpydoc sections with key-value syntax.
E.g. sections that look like this:
key
value
key2 : type
values can also span...
... multiple lines
"""
def _parse_item(self, key: str, value: str) -> DocstringMeta:
pass
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
for match, next_match in _pairwise(KV_REGEX.finditer(text)):
start = match.end()
end = next_match.start() if next_match is not None else None
value = text[start:end]
yield self._parse_item(
key=match.group(), value=inspect.cleandoc(value)
)
class _SphinxSection(Section):
"""Base parser for numpydoc sections with sphinx-style syntax.
E.g. sections that look like this:
.. title:: something
possibly over multiple lines
"""
@property
def title_pattern(self) -> str:
return r"^\.\.\s*({})\s*::".format(self.title)
class ParamSection(_KVSection):
"""Parser for numpydoc parameter sections.
E.g. any section that looks like this:
arg_name
arg_description
arg_2 : type, optional
descriptions can also span...
... multiple lines
"""
def _parse_item(self, key: str, value: str) -> DocstringParam:
m = PARAM_KEY_REGEX.match(key)
arg_name = type_name = is_optional = None
if m is not None:
arg_name, type_name = m.group("name"), m.group("type")
if type_name is not None:
optional_match = PARAM_OPTIONAL_REGEX.match(type_name)
if optional_match is not None:
type_name = optional_match.group("type")
is_optional = True
else:
is_optional = False
default = None
if len(value) > 0:
default_match = PARAM_DEFAULT_REGEX.search(value)
if default_match is not None:
default = default_match.group("value")
return DocstringParam(
args=[self.key, arg_name],
description=_clean_str(value),
arg_name=arg_name,
type_name=type_name,
is_optional=is_optional,
default=default,
)
class RaisesSection(_KVSection):
"""Parser for numpydoc raises sections.
E.g. any section that looks like this:
ValueError
A description of what might raise ValueError
"""
def _parse_item(self, key: str, value: str) -> DocstringRaises:
return DocstringRaises(
args=[self.key, key],
description=_clean_str(value),
type_name=key if len(key) > 0 else None,
)
class ReturnsSection(_KVSection):
"""Parser for numpydoc raises sections.
E.g. any section that looks like this:
return_name : type
A description of this returned value
another_type
Return names are optional, types are required
"""
is_generator = False
def _parse_item(self, key: str, value: str) -> DocstringReturns:
m = RETURN_KEY_REGEX.match(key)
if m is not None:
return_name, type_name = m.group("name"), m.group("type")
else:
return_name = type_name = None
return DocstringReturns(
args=[self.key],
description=_clean_str(value),
type_name=type_name,
is_generator=self.is_generator,
return_name=return_name,
)
class YieldsSection(ReturnsSection):
"""Parser for numpydoc generator "yields" sections."""
is_generator = True
class DeprecationSection(_SphinxSection):
"""Parser for numpydoc "deprecation warning" sections."""
def parse(self, text: str) -> T.Iterable[DocstringDeprecated]:
version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None]
if desc is not None:
desc = _clean_str(inspect.cleandoc(desc))
yield DocstringDeprecated(
args=[self.key], description=desc, version=_clean_str(version)
)
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def _setup(self):
self.titles_re = re.compile(
r"|".join(s.title_pattern for s in self.sections.values()),
flags=re.M,
)
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
def parse(self, text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring(style=DocstringStyle.numpydoc)
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)):
title = next(g for g in match.groups() if g is not None)
factory = self.sections[title]
# section chunk starts after the header,
# ends at the start of the next header
start = match.end()
end = nextmatch.start() if nextmatch is not None else None
ret.meta.extend(factory.parse(meta_chunk[start:end]))
return ret
def parse(text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
return NumpydocParser().parse(text)
| mit | -4,061,001,772,794,959,000 | 29.756024 | 79 | 0.590442 | false | 3.984003 | false | false | false |
low-sky/h2codumb | h2co_mm.py | 1 | 6796 | """
===========================
Formaldehyde mm-line fitter
===========================
This is a formaldehyde 3_03-2_02 / 3_22-221 and 3_03-2_02/3_21-2_20 fitter.
It is based entirely on RADEX models.
This is the EWR fork of the fitter in pyspeckit.
"""
import numpy as np
import pyspeckit.spectrum.models.hyperfine as hyperfine
from pyspeckit.spectrum.models import fitter,model#,modelgrid
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['threeohthree','threetwotwo','threetwoone']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'threeohthree': 218.222192e9,
'threetwotwo': 218.475632e9,
'threetwoone': 218.760066e9,
}
line_strength_dict={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
relative_strength_total_degeneracy={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
freq_dict = central_freq_dict
aval_dict = {
'threeohthree': 2.818e-4,
'threetwotwo': 1.571e-4,
'threetwoone': 1.577e-4,
}
voff_lines_dict = {
'threeohthree': 0.,
'threetwotwo': 0.,
'threetwoone': 0.,
}
formaldehyde_mm_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict, relative_strength_total_degeneracy)
formaldehyde_mm_vtau_fitter = formaldehyde_mm_vtau.fitter
formaldehyde_mm_vtau_vheight_fitter = formaldehyde_mm_vtau.vheight_fitter
def h2co_mm_radex(xarr,
Temperature=25,
logColumn=13,
logDensity=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
gridbundle = None,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
Parameters
----------
grid_vwidth : float
the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
density : float
Density!
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
Tex303,Tex322,Tex321,tau303,tau322,tau321 = gridbundle
# if this gets too far different from 1, we are gonna have a Bad Time.
scalefac = grid_vwidth/width
tex = (Tex303(logColumn,logDensity,Temperature),
Tex322(logColumn,logDensity,Temperature),
Tex321(logColumn,logDensity,Temperature))
tau = (tau303(logColumn,logDensity,Temperature)*scalefac,
tau322(logColumn,logDensity,Temperature)*scalefac,
tau321(logColumn,logDensity,Temperature)*scalefac)
if np.any(np.isnan(tex)) or np.any(np.isnan(tau)):
raise ValueError("Invalid column/density")
if verbose:
for ta,tk in zip(tau,tex):
print "density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g" % (logDensity, Temperature, logColumn, ta, tk)
if debug:
import pdb; pdb.set_trace()
# here there be physics
ckms = 2.99792458e5
freq_dict = {
'303': 218.222192e9,
'322': 218.475632e9,
'321': 218.760066e9,
}
Tbg = 2.73 #because it totally is
nu0 = np.array([ 218.222192e9, 218.475632e9,218.760066e9])
nuwidth = [width/ckms*nu for nu in nu0]
nuoff = [xoff_v/ckms*nu for nu in nu0]
minfreq = nu0/1e9 - 0.25
maxfreq = nu0/1e9 + 0.25
# spec2 = np.zeros(len(xarr))
# for ii in range(len(nu0)):
# taunu = tau[ii]*np.exp(-(xarr+nuoff[ii]-nu0[ii])**2/(2.0*nuwidth[ii]**2))
# spec2 = spec2 + (1-np.exp(-taunu))*tex[ii] + Tbg*(np.exp(-taunu)-1) #second term assumes an ON-OFF
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
# import pdb
# pdb.set_trace()
return spec
def formaldehyde_mm(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_components=False ):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define it to be Tex given tau=0.01 when
passing to the fitter
The final spectrum is then rescaled to that value
The components are independent, but with offsets set by frequency... in principle.
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*0.01, tau=0.01, xoff_v=xoff_v,
width=width,
return_components=return_components)
if return_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
class formaldehyde_mm_model(model.SpectralModel):
pass
formaldehyde_mm_fitter = formaldehyde_mm_model(formaldehyde_mm, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunits='Hz' )
formaldehyde_mm_vheight_fitter = formaldehyde_mm_model(fitter.vheightmodel(formaldehyde_mm), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunits='Hz' )
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| gpl-2.0 | -8,952,912,852,867,932,000 | 31.208531 | 142 | 0.617716 | false | 2.995152 | false | false | false |
vongola12324/Linking-Loader | main.py | 1 | 3185 | import os
import sys
# Open File
filename = ""
filename = input("Enter Input Filename: ")
if filename == "":
filename = "linkin.txt"
fin = open(filename, "r")
fout = open("out.txt", "w")
# Variable Prepare
PGBLOCKS = {}
MODIFY = []
OBJCODE = []
# Method Prepare
def splitline(line):
word = line.strip().split()
return word
def getObjline(START=None):
start = int(START, 16)
for i in OBJCODE:
objstart = int(i.get("START"), 16)
objlen = int(i.get("LENGTH"), 16)
if objstart <= start <= objstart + objlen:
return i
else:
continue
def toSignedInt(hexstr):
i = int(hexstr, 16)
if i > 0x7FFFFF:
i -= 0x1000000
return i
def toSignedHex(num):
return hex(((abs(num) ^ 0xffff) + 1) & 0xffff)
# Program Start
Offset = input("Enter Program Start Address: ")
Offset = int(Offset, 16)
Length = 0
while True:
line = fin.readline()
if not line:
break
else:
if line[0] == "H":
word = splitline(line)
PGBLOCKS.update({word[1]: hex(int(word[2], 16) + Offset)[2:].upper()})
Length = int(word[3], 16)
elif line[0] == "D":
word = splitline(line)
for i in range(1, len(word), 2):
PGBLOCKS.update({word[i]: word[i + 1]})
elif line[0] == "R":
continue
elif line[0] == "E":
Offset += Length
continue
elif line[0] == "T":
word = splitline(line)
string = ""
for i in range(3, len(word)):
string += word[i]
head = hex(int(word[1], 16) + Offset)[2:].upper()
while len(head) < 6:
head = "0" + head
OBJCODE.append({"START": head, "LENGTH": word[2], "OBJC": string})
else:
word = splitline(line)
if word != []:
MODIFY.append(
{"ADDR": hex(toSignedInt(word[1]) + Offset), "LENGTH": word[2], "OPER": word[3], "PGB": word[4]})
fin.close()
for i in MODIFY:
ObjLine = getObjline(i.get("ADDR"))
Objc = ObjLine.get("OBJC")
selectStart = (int(i.get("ADDR"), 16) - int("0x" + ObjLine.get("START"), 16)) * 2
if int(i.get("LENGTH"), 16) % 2 == 1:
selectStart += 1
ModObjc = Objc[selectStart:selectStart + int(i.get("LENGTH"), 16)]
PGB = PGBLOCKS.get(i.get("PGB"))
if i.get("OPER") == "+":
ModObjc = toSignedHex(toSignedInt(ModObjc) + toSignedInt(PGB))[2:].upper()
else:
ModObjc = toSignedHex(toSignedInt(ModObjc) - toSignedInt(PGB))[2:].upper()
while len(ModObjc) < int(i.get("LENGTH"), 16):
ModObjc = "0" + ModObjc
ObjLine.update({"OBJC": Objc[:selectStart] + ModObjc + Objc[selectStart + int(i.get("LENGTH"), 16):]})
for i in OBJCODE:
Objc = i.get("OBJC")
while len(Objc) < 32:
Objc += "."
i.update({"OBJC": Objc})
fout.write(
"{0:<06s} {1:<8s} {2:<8s} {3:<8s} {4:<8s}\n".format(i.get("START"), i.get("OBJC")[0:8], i.get("OBJC")[8:16],
i.get("OBJC")[16:24], i.get("OBJC")[24:32]))
fout.close() | gpl-3.0 | 8,571,556,472,533,741,000 | 28.229358 | 122 | 0.513344 | false | 3.113392 | false | false | false |
miyataken999/weblate | weblate/trans/admin.py | 1 | 8516 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from weblate.trans.models import (
Project, SubProject, Translation, Advertisement,
Unit, Suggestion, Comment, Check, Dictionary, Change,
Source, WhiteboardMessage
)
class ProjectAdmin(admin.ModelAdmin):
list_display = (
'name', 'slug', 'web', 'list_owners', 'enable_acl', 'enable_hooks',
'num_vcs', 'num_strings', 'num_words', 'num_langs',
)
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name', 'slug', 'web']
actions = ['update_from_git', 'update_checks', 'force_commit']
def list_owners(self, obj):
return ', '.join(obj.owners.values_list('username', flat=True))
list_owners.short_description = _('Owners')
def num_vcs(self, obj):
return obj.subproject_set.exclude(repo__startswith='weblate:/').count()
num_vcs.short_description = _('VCS repositories')
def num_strings(self, obj):
return obj.get_total()
num_strings.short_description = _('Source strings')
def num_words(self, obj):
return obj.get_total_words()
num_words.short_description = _('Source words')
def num_langs(self, obj):
return obj.get_language_count()
num_langs.short_description = _('Languages')
def update_from_git(self, request, queryset):
"""
Updates selected components from git.
"""
for project in queryset:
project.do_update(request)
self.message_user(request, "Updated %d git repos." % queryset.count())
update_from_git.short_description = _('Update VCS repository')
def update_checks(self, request, queryset):
"""
Recalculates checks for selected components.
"""
cnt = 0
units = Unit.objects.filter(
translation__subproject__project__in=queryset
)
for unit in units.iterator():
unit.run_checks()
cnt += 1
self.message_user(request, "Updated checks for %d units." % cnt)
update_checks.short_description = _('Update quality checks')
def force_commit(self, request, queryset):
"""
Commits pending changes for selected components.
"""
for project in queryset:
project.commit_pending(request)
self.message_user(
request,
"Flushed changes in %d git repos." % queryset.count()
)
force_commit.short_description = _('Commit pending changes')
class SubProjectAdmin(admin.ModelAdmin):
list_display = [
'name', 'slug', 'project', 'repo', 'branch', 'vcs', 'file_format'
]
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name', 'slug', 'repo', 'branch']
list_filter = ['project', 'vcs', 'file_format']
actions = ['update_from_git', 'update_checks', 'force_commit']
def update_from_git(self, request, queryset):
"""
Updates selected components from git.
"""
for project in queryset:
project.do_update(request)
self.message_user(request, "Updated %d git repos." % queryset.count())
update_from_git.short_description = _('Update VCS repository')
def update_checks(self, request, queryset):
"""
Recalculates checks for selected components.
"""
cnt = 0
units = Unit.objects.filter(
translation__subproject__in=queryset
)
for unit in units.iterator():
unit.run_checks()
cnt += 1
self.message_user(
request,
"Updated checks for %d units." % cnt
)
update_checks.short_description = _('Update quality checks')
def force_commit(self, request, queryset):
"""
Commits pending changes for selected components.
"""
for project in queryset:
project.commit_pending(request)
self.message_user(
request,
"Flushed changes in %d git repos." % queryset.count()
)
force_commit.short_description = _('Commit pending changes')
class TranslationAdmin(admin.ModelAdmin):
list_display = [
'subproject', 'language', 'translated', 'total',
'fuzzy', 'revision', 'filename', 'enabled'
]
search_fields = [
'subproject__slug', 'language__code', 'revision', 'filename'
]
list_filter = ['enabled', 'subproject__project', 'subproject', 'language']
actions = ['enable_translation', 'disable_translation']
def enable_translation(self, request, queryset):
"""
Mass enabling of translations.
"""
queryset.update(enabled=True)
self.message_user(
request,
"Enabled %d translations." % queryset.count()
)
def disable_translation(self, request, queryset):
"""
Mass disabling of translations.
"""
queryset.update(enabled=False)
self.message_user(
request,
"Disabled %d translations." % queryset.count()
)
class UnitAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'position', 'fuzzy', 'translated']
search_fields = ['source', 'target', 'checksum']
list_filter = [
'translation__subproject',
'translation__language',
'fuzzy',
'translated'
]
class SuggestionAdmin(admin.ModelAdmin):
list_display = ['contentsum', 'target', 'project', 'language', 'user']
list_filter = ['project', 'language']
search_fields = ['contentsum', 'target']
class CommentAdmin(admin.ModelAdmin):
list_display = [
'contentsum', 'comment', 'user', 'project', 'language', 'user'
]
list_filter = ['project', 'language']
search_fields = ['contentsum', 'comment']
class CheckAdmin(admin.ModelAdmin):
list_display = ['contentsum', 'check', 'project', 'language', 'ignore']
search_fields = ['contentsum', 'check']
list_filter = ['check', 'project', 'ignore']
class DictionaryAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'project', 'language']
search_fields = ['source', 'target']
list_filter = ['project', 'language']
class ChangeAdmin(admin.ModelAdmin):
list_display = ['unit', 'user', 'timestamp']
date_hierarchy = 'timestamp'
list_filter = [
'unit__translation__subproject',
'unit__translation__subproject__project',
'unit__translation__language'
]
raw_id_fields = ('unit',)
class WhiteboardAdmin(admin.ModelAdmin):
list_display = ['message', 'project', 'subproject', 'language']
prepopulated_fields = {}
search_fields = ['message']
list_filter = ['project', 'language']
class AdvertisementAdmin(admin.ModelAdmin):
list_display = ['placement', 'date_start', 'date_end', 'text']
search_fields = ['text', 'note']
date_hierarchy = 'date_end'
class SourceAdmin(admin.ModelAdmin):
list_display = ['checksum', 'priority', 'timestamp']
date_hierarchy = 'timestamp'
# Register in admin interface
admin.site.register(Project, ProjectAdmin)
admin.site.register(SubProject, SubProjectAdmin)
admin.site.register(Advertisement, AdvertisementAdmin)
admin.site.register(WhiteboardMessage, WhiteboardAdmin)
# Show some controls only in debug mode
if settings.DEBUG:
admin.site.register(Translation, TranslationAdmin)
admin.site.register(Unit, UnitAdmin)
admin.site.register(Suggestion, SuggestionAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Check, CheckAdmin)
admin.site.register(Dictionary, DictionaryAdmin)
admin.site.register(Change, ChangeAdmin)
admin.site.register(Source, SourceAdmin)
| gpl-3.0 | 8,613,016,902,550,696,000 | 32.124514 | 79 | 0.633267 | false | 3.989222 | false | false | false |
Gaha/intranet | ce/models.py | 1 | 2664 | #-*- coding:utf-8 -*-
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Agent(models.Model):
LISTE_CONTRAT = (
('CDI', 'CDI'),
('CDD', 'CDD'),
('PRE', 'Prestataire'),
('INT', 'Intérime'),
)
nom = models.CharField(max_length=50)
prenom = models.CharField(max_length=50)
contrat = models.CharField(max_length=3, choices=LISTE_CONTRAT, default='CDI')
# c'est ce qui s'affiche quand on print, notament dans admin
def __unicode__(self):
return u'%s %s' % (self.nom, self.prenom)
class Mendat(models.Model):
LISTE_MENDAT = (
('DUP_PR', 'DUP Président'),
('DUP_SE', 'DUP Secrétaire'),
('DUP_TR', 'DUP Trésorier'),
('CA_TIT', 'DUP Cadre Titulaire'),
('CA_SUP', 'DUP Cadre Suppléant'),
('AG_TIT', 'DUP Agent Titulaire'),
('AG_SUP', 'DUP Agent Suppléant'),
('DS', 'Délégués Syndical'),
('CHS_PR', 'CHSCT Président'),
('CHS_SE', 'CHSCT Secrétaire'),
('CHS_ME', 'CHSCT Membres'),
)
nom = models.ForeignKey('Agent')
mendat = models.CharField(max_length=6, choices=LISTE_MENDAT)
def __unicode__(self):
return u'%s - %s' % (self.nom, self.mendat)
class Commission(models.Model):
nom = models.CharField(max_length=50)
def __unicode__(self):
return u'%s' % (self.nom)
class CommissionMembre(models.Model):
LISTE_MEMBRE = (
('PRE', 'Président'),
('DUP', 'Membre DUP'),
('AGE', 'Membre Agent')
)
commission = models.ForeignKey('Commission')
agent = models.ForeignKey('Agent')
membre = models.CharField(max_length=3, choices=LISTE_MEMBRE)
def __unicode__(self):
return u'%s : %s - %s' % (self.commission, self.agent, self.membre)
class Activitee(models.Model):
nom = models.CharField(max_length=50)
commission = models.ForeignKey('Commission')
date = models.DateField()
heure = models.TimeField()
def __unicode__(self):
return u'%s : %s' % (self.date, self.nom)
class Participation(models.Model):
LISTE_ETAT = (
('IN', 'Inscrit'),
('AN', 'Annuler'),
)
nom = models.ForeignKey('Agent')
activitee = models.ForeignKey('Activitee')
agent = models.IntegerField(default=1)
conjoint = models.IntegerField(default=0)
enfant = models.IntegerField(default=0)
externe = models.IntegerField(default=0)
etat = models.CharField(max_length=2, choices=LISTE_ETAT, default='IN')
def __unicode__(self):
return u'%s : %s' % (self.activitee, self.nom)
| gpl-2.0 | 4,657,869,429,667,921,000 | 27.826087 | 82 | 0.597662 | false | 3.073001 | false | false | false |
effigies/mne-python | examples/realtime/ftclient_rt_average.py | 2 | 2816 | """
========================================================
Compute real-time evoked responses with FieldTrip client
========================================================
This example demonstrates how to connect the MNE real-time
system to the Fieldtrip buffer using FieldTripClient class.
This example was tested in simulation mode
neuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif
using a modified version of neuromag2ft available at
http://neuro.hut.fi/~mainak/neuromag2ft-2.0.0.zip
to run the FieldTrip buffer. Then running this example acquires the
data on the client side.
Since the Fieldtrip buffer does not contain all the
measurement information required by the MNE real-time processing
pipeline, an info dictionary must be provided to instantiate FieldTripClient.
Alternatively, the MNE-Python script will try to guess the missing
measurement info from the Fieldtrip Header object.
Together with RtEpochs, this can be used to compute evoked
responses using moving averages.
"""
print(__doc__)
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.viz import plot_events
from mne.realtime import FieldTripClient, RtEpochs
import matplotlib.pyplot as plt
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
plt.ion() # make plot interactive
_, ax = plt.subplots(2, 1, figsize=(8, 8)) # create subplots
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=True, exclude=bads)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax,
stim_channel='STI 014', picks=picks,
reject=dict(grad=4000e-13, eog=150e-6),
decim=1, isi_max=10.0, proj=None)
# start the acquisition
rt_epochs.start()
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
if ii > 0:
ev += evoked
evoked = ev
ax[0].cla(), ax[1].cla() # clear axis
plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'],
first_samp=-rt_client.tmin_samp, axes=ax[0])
evoked.plot(axes=ax[1]) # plot on second subplot
ax[1].set_title('Evoked response for gradiometer channels'
'(event_id = %d)' % event_id)
plt.pause(0.05)
plt.draw()
plt.close()
| bsd-3-clause | 8,140,795,396,231,081,000 | 30.288889 | 77 | 0.638849 | false | 3.506849 | false | false | false |
justmedude/librenms | services-wrapper.py | 1 | 13762 | #! /usr/bin/env python3
"""
services-wrapper A small tool which wraps around check-services.php and tries to
guide the services process with a more modern approach with a
Queue and workers.
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <[email protected]>
Orsiris de Jong <[email protected]>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 1 thread.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/.
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except ImportError as exc:
print('ERROR: missing one or more of the following python modules:')
print('threading, queue, sys, subprocess, time, os, json')
print('ERROR: %s' % exc)
sys.exit(2)
APP_NAME = "services_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
servicedisco = False
real_duration = 0
service_devices = 0
"""
Threading helper functions
"""
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC0
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('poller.ping.' + key, key, 60)
if memc.get('poller.ping.' + key) == key:
memc.delete('poller.ping.' + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
#EOC0
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then they two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC4
global IsNode
global servicedisco
if servicedisco:
if not IsNode:
memc_touch('service.master', 10)
nodes = memc.get('service.nodes')
if nodes is None and not memc_alive():
print("WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly.")
servicedisco = False
nodes = nodeso
if nodes is not nodeso:
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch('service.nodes', 10)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global service_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
service_devices += 1
if elapsed_time < 300:
print("INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
else:
print("WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
print_queue.task_done()
"""
This class will fork off single instances of the check-services.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC5
if not servicedisco or memc.get('service.device.' + str(device_id)) is None:
if servicedisco:
result = memc.add('service.device.' + str(device_id), config['distributed_poller_name'], 300)
if not result:
print("This device (%s) appears to be being service checked by another service node" % (device_id))
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print("Lost Memcached, Not service checking Device %s as Node. Master will check it." % device_id)
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
output = "-d >> %s/services_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
# TODO replace with command_runner
command = "/usr/bin/env php %s -h %s %s 2>&1" % (service_path, device_id, output)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put([threading.current_thread().name, device_id, elapsed_time])
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
if __name__ == '__main__':
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
install_dir = os.path.dirname(os.path.realpath(__file__))
LNMS.check_for_file(install_dir + '/config.php')
config = json.loads(LNMS.get_config_data(install_dir))
service_path = config['install_dir'] + '/check-services.php'
log_dir = config['log_dir']
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC1
if 'distributed_poller_group' in config:
service_group = str(config['distributed_poller_group'])
else:
service_group = False
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("service.master")) == config['distributed_poller_name']:
print("This system is already joined as the service master.")
sys.exit(2)
if memc_alive():
if memc.get("service.master") is None:
print("Registered as Master")
memc.set("service.master", config['distributed_poller_name'], 10)
memc.set("service.nodes", 0, 300)
IsNode = False
else:
print("Registered as Node joining Master %s" % memc.get("service.master"))
IsNode = True
memc.incr("service.nodes")
servicedisco = True
else:
print("Could not connect to memcached, disabling distributed service checks.")
servicedisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python3-memcache")
print("On other systems: pip3 install python-memcached")
print("Disabling distributed discovery.")
servicedisco = False
else:
servicedisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
service_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 1 (Do not set too high)"
description = "Spawn multiple check-services.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC2
if service_group is not False:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`poller_group` IN(" + service_group + ") AND `devices`.`disabled` = 0"
else:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`disabled` = 0"
# EOC2
db = LNMS.db_open(config['db_socket'], config['db_host'], config['db_port'], config['db_user'], config['db_pass'], config['db_name'])
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC3
if servicedisco and not IsNode:
query = "SELECT MAX(`device_id`), MIN(`device_id`) FROM `services`"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
print("INFO: starting the service check at %s with %s threads" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers))
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print("INFO: services-wrapper checked %s devices in %s seconds with %s workers" % (service_devices, total_time, amount_of_workers))
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC6
if servicedisco or memc_alive():
master = memc.get("service.master")
if master == config['distributed_poller_name'] and not IsNode:
print("Wait for all service-nodes to finish")
nodes = memc.get("service.nodes")
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get("service.nodes")
except:
pass
print("Clearing Locks")
x = minlocks
while x <= maxlocks:
memc.delete('service.device.' + str(x))
x = x + 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete("service.master")
memc.delete("service.nodes")
else:
memc.decr("service.nodes")
print("Finished %s." % time.time())
# EOC6
show_stopper = False
if total_time > 300:
print("WARNING: the process took more than 5 minutes to finish, you need faster hardware or more threads")
print("INFO: in sequential style service checks the elapsed time would have been: %s seconds" % real_duration)
for device in per_device_duration:
if per_device_duration[device] > 300:
print("WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device]))
show_stopper = True
if show_stopper:
print("ERROR: Some devices are taking more than 300 seconds, the script cannot recommend you what to do.")
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend)
sys.exit(2)
| gpl-3.0 | -8,893,147,896,201,469,000 | 36.807692 | 229 | 0.58102 | false | 4.02045 | true | false | false |
mohou/Mohou_Box-master | boxPrint/print_service/__init__.py | 1 | 10414 | # coding=utf-8
#from threading import Thread
import Queue
import sys
import time
import logging
import re
import os
import psutil
class PrintService(object):
def __init__(self, profile, serialInfo):
# super(PrintService, self).__init__(name="PrintService")
self.profile = profile
self.serialInfo = serialInfo
self.printer = None
self.logger = logging.getLogger(__name__)
# self.stopFlag = False
# self.command_queue = Queue.PriorityQueue()
# def run(self):
# while True:
# if self.stopFlag:
# break
# (command, payload) = self.command_queue.get(True)
# print "command: %s" % str(command)
# print "payload: %s" % str(payload)
# method = getattr(self, command, None)
# if not method:
# print "Unkown command: %s!" % command
# continue
# try:
# method(payload)
# except Exception as e:
# print "Exception: %s." % e.message
# else:
# pass
#
# # Stop print service.
def stop(self):
# self.stopFlag = True
self.disconnectPrinter()
#
# # Send command to queue.
# def connect(self, payload=None):
# self.command_queue.put(("connectPrinter", payload), 0)
#
# def disconnect(self, payload=None):
# self.command_queue.put(("disconnectPrinter", payload), 0)
#
# def start(self, payload=None):
# self.command_queue.put(("startPrint", payload), 0)
#
# def pause(self, payload=None):
# self.command_queue.put(("pausePrint", payload), 0)
#
# def unpause(self, payload=None):
# self.command_queue.put(("unpausePrint", payload), 0)
#
# def cancel(self, payload=None):
# self.command_queue.put(("cancelPrint", payload), 0)
#
# def execute(self, payload):
# self.command_queue.put(("executeCommand", payload), 0)
# Execute printer command.
def connectPrinter(self, playload=None):
ret = False
if (self.profile['driver'] is not None) and (self.serialInfo['COM'] is not None):
if self.printer is not None:
self.disconnectPrinter()
time.sleep(0.1)
try:
printer_class = __import__(self.profile['driver'])
except ImportError as ie:
self.logger.error("Printer type %s not supported." % self.profile['driver'])
self.logger.error("Import error: %s" % ie.message)
else:
try:
self.printer = printer_class.Printer(self.profile, self.serialInfo)
except RuntimeError as e:
message = "Can't connect to printer %s %s\nReason: %s" % (self.profile['name'], str(self.serialInfo), e.message)
self.logger.error(message)
except Exception:
message = "Unexpected error while connecting to %s: %s" % (self.profile['name'], sys.exc_info()[1])
self.logger.error(message)
else:
message = "Successful connection to %s!" % (self.profile['name'])
self.logger.info(message)
ret = True
return ret
def disconnectPrinter(self, playload=None):
if self.printer is None:
return
#if self.printer.is_operational():
self.printer.close()
self.printer = None
def startPrint(self, payload):
if self.printer is None:
return
if payload['filetype'] == 'gcode':
self.printer.gcodes(self.printer.resource_url + payload['res_id'], is_link = True, file_type=payload['filetype'], res_id=payload['res_id'])
else:
self.printer.gcodes(self.printer.resource_url + payload['res_id'], is_link = True, file_type=payload['filetype'], res_id=payload['res_id'],\
slc_id=payload['slc_id'], slc_flag=int(payload['slc_flag']), slc_lines=int(payload['slc_lines']), slc_ptime=int(payload['slc_ptime']))
def pausePrint(self, payload=None):
if self.printer is None:
return
self.printer.pause()
def unpausePrint(self, payload=None):
if self.printer is None:
return
self.printer.unpause()
def cancelPrint(self, payload=None):
if self.printer is None:
return
self.printer.cancel()
def executeCommand(self, payload):
if self.printer is None:
return
self.printer.unbuffered_gcodes(payload)
def removeFile(self, payload):
if self.printer is None:
return
self.printer.removeFile(payload)
def toOperational(self, payload=None):
if self.printer is None:
return
self.printer.toOperational()
def getStatus(self):
data = {
"boxid": self.profile['boxid'],
"name": self.profile['box_name'],
"port": "",
"baudrate": "",
"pid": "",
"pname": "",
"vid": "",
"vname": "",
"app_ver": "1.0.1",
#"proto_ver": "1.0.0",
"bed_temperature": 0,
"target_bed_temperature": 0,
"temperature1": 0,
"target_temperature1": 0,
"temperature2": 0,
"target_temperature2": 0,
"extruder_amount": 1,
"printer_state": 1,
"print_progress": 0,
"print_speed": 0,
"fan_speed": 0,
"print_time_escape": "00:00:00",
"print_time_remain": "00:00:00",
'cpu_usage': 0,
'disk_size': 0,
'free_disk_size': 0,
'mem_size': 0,
'free_mem_size': 0,
'loc_ip': "127.0.0.1",
}
if self.printer is None:
data["printer_state"] = 1
else:
self.printer.read_state()
try:
data["bed_temperature"] = self.printer.temps[0]
data["target_bed_temperature"] = self.printer.target_temps[0]
data["temperature1"] = self.printer.temps[1]
data["target_temperature1"] = self.printer.target_temps[1]
data["temperature2"] = self.printer.temps[2]
data["target_temperature2"] = self.printer.target_temps[2]
except Exception as ex:
pass
data["extruder_amount"] = self.printer.extruder_amount
data["printer_state"] = self.printer.printer_state
data["print_progress"] = self.printer.print_progress
data["print_speed"] = self.printer.print_speed
data["fan_speed"] = self.printer.fan_speed
if hasattr(self.printer, "print_time_escape"):
data["print_time_escape"] = self.printer.print_time_escape
if hasattr(self.printer, "print_time_remain"):
data["print_time_remain"] = self.printer.print_time_remain
hddinfo = os.statvfs(self.printer.model_file_path)
data['disk_size'] = hddinfo.f_frsize * hddinfo.f_blocks / 1024
#剩余存储空间,单位为KB
data['free_disk_size'] = hddinfo.f_frsize * hddinfo.f_bavail / 1024
#总内存,单位KB
phymem = psutil.virtual_memory()
#剩余内存,单位KB
data['mem_size'] = phymem.total / 1024
data['free_mem_size'] = phymem.free / 1024
#CPU占用率,百分数,60%表示为60
data['port'] = self.serialInfo["COM"]
data['baudrate'] = self.printer.correct_baudrate
data['cpu_usage'] = psutil.cpu_percent()
data['pid'] = self.serialInfo["PID"]
data['vid'] = self.serialInfo["VID"]
#内网IP如192.168.1.100
text = os.popen("ifconfig eth0").read()
reg_eth0 = re.match(r".*addr:(.*) Bcast:.*Mask:(.*)", text, re.S)
text = os.popen("ifconfig wlan0").read()
reg_wlan0 = re.match(r".*addr:(.*) Bcast:.*Mask:(.*)", text, re.S)
if reg_wlan0:
data['loc_ip'] = reg_wlan0.group(1)
elif reg_eth0:
data['loc_ip'] = reg_eth0.group(1)
else:
data['loc_ip'] = "127.0.0.1"
return data
def goHome(self):
if self.printer is None:
return
self.printer.goHome()
def goXYHome(self):
if self.printer is None:
return
self.printer.goXYHome()
def goZHome(self):
if self.printer is None:
return
self.printer.goZHome()
def goXPosition(self, pos):
if self.printer is None:
return
self.printer.goXPosition(pos)
def goYPosition(self, pos):
if self.printer is None:
return
self.printer.goYPosition(pos)
def goZPosition(self, pos):
if self.printer is None:
return
self.printer.goZPosition(pos)
def goEOperation(self, e, length):
if self.printer is None:
return
self.printer.goEOperation(e, length)
def setBedTargetTemp(self, temp):
if self.printer is None:
return
self.printer.setBedTargetTemp(temp)
def setETargetTemp(self, e, temp):
if self.printer is None:
return
self.printer.setETargetTemp(e, temp)
def setSpeedFactor(self, speedfactor):
if self.printer is None:
return
self.printer.setSpeedFactor(speedfactor)
| apache-2.0 | 6,482,249,820,652,363,000 | 35.194245 | 163 | 0.499516 | false | 3.915184 | false | false | false |
IcyMint/Barchine | Barchine_gui.py | 1 | 66916 | import PySimpleGUI as sg
from Ingredient_Library import restoreIngredientLibrary, storeIngredientLibrary, createIngredient
from Ingredient_Library import listIngredients, getFamilyTypes, getBaseTypes, deleteIngredient
from Ingredient_Library import restoreBases
from Drink_Library import restoreDrinkLibrary, storeDrinkLibrary, listDrinks, deleteDrink, getIceTypes, getGlassTypes, createDrink
import Bartender
from Keypad import Keypad
import sys
import os
from pathlib import Path
import re
from Logging import log
#Initialize display properties
if os.environ.get('DISPLAY','') == '':
print('no display found. Using :0.0')
os.environ.__setitem__('DISPLAY', ':0.0')
sg.theme('DarkAmber')
#Fullscreen selector
FULLSCREEN = False
#Screen Resolution
RESOLUTION = {'x':800,'y':480}
#Load library information
restoreBases()
restoreIngredientLibrary()
restoreDrinkLibrary()
def contextSwitcher(current, next, window):
#Check for Home menu selection
if(current == 'Home_home'):
if(next == 'Library_home'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_home'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_home'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_home'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_home'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Library menu selection
if(current == 'Library_library'):
if(next == 'Home_library'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Ingredients_library'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_library'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_library'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_library'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Ingredients menu selection
if(current == 'Ingredients_ingredients'):
if(next == 'Home_ingredients'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_ingredients'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Stations_ingredients'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_ingredients'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_ingredients'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Stations menu selection
if(current == 'Stations_stations'):
if(next == 'Home_stations'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_stations'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_stations'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stats_stations'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_stations'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Stats menu selection
if(current == 'Stats_stats'):
if(next == 'Home_stats'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_stats'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_stats'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_stats'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Settings_stats'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Settings menu selection
if(current == 'Settings_settings'):
if(next == 'Home_settings'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_settings'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_settings'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_settings'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_settings'):
#window.hide()
StatsGUI(window)
#window.close()
def HomeGUI(prev_window):
filtered = True
shelf = {}
#Get a dict of shelf names
for element in Bartender.getShelf():
if(element is not None):
shelf[element.getBase()] = ''
drinkInfo_home = [
[sg.Text('-DRINK_NAME-',key='DRINK_NAME_home',font=('Helvetica', 15),size=(15,1))],
[sg.Text('-ICE_NAME-',key='ICE_NAME_home',size=(15,1))],
[sg.Text('-GLASS_NAME-',key='GLASS_NAME_home',size=(18,1))],
[sg.Text('-GARNISH_NAME-',key='GARNISH_NAME_home',size=(15,1),relief='ridge',enable_events=True)],
[sg.Text('-EXTRAS_NAME-',key='EXTRAS_NAME_home',size=(15,3),relief='ridge',enable_events=True)],
[sg.Text('Ingredients:',font=('Helvetica', 15))],
[sg.Listbox(['-DRINK_COMPONENTS-'],size=(25,4),key='DrinkIngredients_home')]
]
#Image translation
image = Path('Image_Library/placeholder.png')
image_layout_home = [
[sg.Image(filename=image,key='image_home',size=(128,256))]
]
layout_home = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_home')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_home',border_width=5,button_color=(None,'#60b551')),
sg.Button('Library',font=('Helvetica', 15),key='Library_home'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_home'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_home'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_home'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_home')],
[sg.Listbox(Bartender.showDrinkMenu(True),font=('Helvetica', 20),size=(22,8),
key='Menu_List',enable_events=True),sg.Column(drinkInfo_home),sg.Column(image_layout_home)],
[sg.Button('Order',font=('Helvetica', 20),size=(12,1),key='order_home')
,sg.Button('Custom',font=('Helvetica', 20),size=(8,1),key='custom_home')
,sg.Button('Recommended',font=('Helvetica', 20),size=(12,1),key='recommended_home')
,sg.Button('Unfilter',font=('Helvetica', 20),size=(8,1),key='filter_home')]
]
#Launch Window
window_home = sg.Window('Barchine', layout_home, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_home.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
chosen = None
while True: # Event Loop
event, values = window_home.read()
print(event, values)
#Check for menu selection
if(event == 'Library_home'):
contextSwitcher('Home_home','Library_home',window_home)
if(event == 'Ingredients_home'):
contextSwitcher('Home_home','Ingredients_home',window_home)
if(event == 'Stations_home'):
contextSwitcher('Home_home','Stations_home',window_home)
if(event == 'Stats_home'):
contextSwitcher('Home_home','Stats_home',window_home)
if(event == 'Settings_home'):
contextSwitcher('Home_home','Settings_home',window_home)
#When drink menu item is selected
if event == 'Menu_List':
for drink in listDrinks():
if(drink.getName() == values['Menu_List'][0]):
chosen = drink
window_home['DRINK_NAME_home'].update(drink.getName())
window_home['ICE_NAME_home'].update('Ice: '+drink.getIce())
window_home['GLASS_NAME_home'].update('Glass: '+drink.getGlass())
window_home['GARNISH_NAME_home'].update('Garnish: '+drink.getGarnish())
window_home['EXTRAS_NAME_home'].update('Extras: '+drink.getExtras())
image = Path('Image_Library/'+drink.getImage())
window_home['image_home'].update(filename=image)
#Retrieve list of ingredients formatted
display = []
for key, value in drink.getIngredients().items():
if(key in shelf):
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
else:
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key)+' [Miss]')
window_home['DrinkIngredients_home'].update(display)
if(event == 'order_home'):
if(filtered):
Bartender.createOrder(chosen.getName(),False)
else:
display = []
counter = 0
for key, value in chosen.getIngredients().items():
if(key not in shelf):
display.append(str(key))
counter+=1
if(counter!=0):
if(ForceWarning(display,window_home)):
Bartender.createOrder(chosen.getName(),True)
if(event == 'custom_home'):
CustomView(window_home)
if(event == 'recommended_home'):
pass
if(event == 'filter_home'):
#If currently filtered, unfilter
if(filtered):
#Update variables/Button text
filtered = False
window_home['filter_home'].update(text='Filter')
#Format list of drink names
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_home['Menu_List'].update(values=drinks_pretty)
#If not filtered, make filtered
else:
#Update variables/Button
filtered = True
window_home['filter_home'].update(text='Unfilter')
window_home['Menu_List'].update(values=Bartender.showDrinkMenu(True))
if(event == 'GARNISH_NAME_home' and chosen != None):
TextViewExpanded(chosen.getGarnish(),'Garnish',window_home)
if(event == 'EXTRAS_NAME_home' and chosen != None):
TextViewExpanded(chosen.getExtras(),'Extras',window_home)
if event in (None, 'Exit'):
window_home.close()
break
def ForceWarning(missing,window):
#Temporarily change theme
sg.theme('Dark')
#Temporarily disable host window
window.Disable()
layout_forcewarning = [
[sg.Text('Recipe Warning',key='title_forcewarning',font=('Helvetica', 20))],
[sg.Text('Missing Ingredients:',key='subtitle_forcewarning',font=('Helvetica', 15))],
[sg.Text(size=(12,5),key='missing_forcewarning')],
[sg.Button('Order',font=('Helvetica', 15),key='order_forcewarning'),sg.Button('Cancel',font=('Helvetica', 15),key='cancel_forcewarning')]
]
#Launch window
window_forcewarning = sg.Window('Barchine', layout_forcewarning,keep_on_top=True,no_titlebar=True).Finalize()
window_forcewarning.BringToFront()
#Load missing ingredient values
ingredients = ''
for element in missing:
ingredients+=element+'\n'
window_forcewarning['missing_forcewarning'].update(value=ingredients)
while True: # Event Loop
event, values = window_forcewarning.read()
print(event, values)
if(event == 'order_forcewarning'):
#Re-enable host window
window.Enable()
window.BringToFront()
#Change theme back to normal
sg.theme('DarkAmber')
window_forcewarning.close()
return True
if(event == 'cancel_forcewarning'):
#Re-enable host window
window.Enable()
window.BringToFront()
#Change theme back to normal
sg.theme('DarkAmber')
window_forcewarning.close()
return False
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
#Change theme back to normal
sg.theme('DarkAmber')
window_forcewarning.close()
def CustomView(window):
#Disable host window temporarily
window.Disable()
layout_buttons_customview = [
[sg.Button('Add',font=('Helvetica', 15),key='add_customview')],
[sg.Button('Remove',font=('Helvetica', 15),key='remove_customview')]
]
layout_customview = [
[sg.Text('Custom Drink',key='title_customview',font=('Helvetica', 30))],
[sg.Listbox([],size=(20,4),key='DrinkIngredients_customview',enable_events=True)
,sg.Column(layout_buttons_customview)],
[sg.Button('Order',font=('Helvetica', 15),key='order_customview'),sg.Button('Cancel',font=('Helvetica', 15),key='cancel_customview')],
]
#Launch window
window_customview = sg.Window('Barchine', layout_customview,keep_on_top=True,no_titlebar=True).Finalize()
window_customview.BringToFront()
ingredients = {}
while True: # Event Loop
event, values = window_customview.read()
print(event, values)
if(event == 'add_customview'):
new_elements = IngredientAddPopUp('custom',None,None,window_customview)
if(new_elements[0] is not None):
ingredients[new_elements[0]] = int(new_elements[1])
#Update ingredients list
display = []
for key, value in ingredients.items():
display.append(str(key)+str(value).rjust(20-len(str(key)), ' '))
window_customview['DrinkIngredients_customview'].update(values=display)
if(event == 'remove_customview' and len(values['DrinkIngredients_customview']) > 0):
for key, value in ingredients.items():
if(key == re.findall("[^0-9]*",values['DrinkIngredients_customview'][0])[0].rstrip()):
#Delete from ingredients list
del ingredients[key]
#Update ingredients list
display = []
for key, value in ingredients.items():
display.append(str(key)+str(value).rjust(20-len(str(key)), ' '))
window_customview['DrinkIngredients_customview'].update(values=display)
break
if(event == 'order_customview'):
#TODO: Order the beverage
pass
if(event == 'cancel_customview'):
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_customview.close()
def LibraryGUI(prev_window):
#Format list of drink names
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
shelf = {}
#Get a dict of shelf names
for element in Bartender.getShelf():
if(element is not None):
shelf[element.getBase()] = ''
drinkInfo_library = [
[sg.Text('-DRINK_NAME-',key='DRINK_NAME_library',font=('Helvetica', 15),size=(15,2))],
[sg.Text('-ICE_NAME-',key='ICE_NAME_library',size=(15,1))],
[sg.Text('-GLASS_NAME-',key='GLASS_NAME_library',size=(18,1))],
[sg.Text('-GARNISH_NAME-',key='GARNISH_NAME_library',size=(15,1),relief='ridge',enable_events=True)],
[sg.Text('-EXTRAS_NAME-',key='EXTRAS_NAME_library',size=(15,3),relief='ridge',enable_events=True)],
[sg.Text('Ingredients:',font=('Helvetica', 15))],
[sg.Listbox(['-DRINK_COMPONENTS-'],size=(25,4),key='DrinkIngredients_library')]
]
#Image translation
image = Path('Image_Library/placeholder.png')
image_layout_library = [
[sg.Image(filename=image,key='image_library',size=(128,256))]
]
layout_library = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_library')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_library'),
sg.Button('Library',font=('Helvetica', 15),key='Library_library',border_width=5,button_color=(None,'#60b551')),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_library'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_library'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_library'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_library')],
[sg.Listbox(drinks_pretty,font=('Helvetica', 20),size=(22,8),
key='Library_List',enable_events=True),sg.Column(drinkInfo_library),sg.Column(image_layout_library)],
[sg.Button('Add',font=('Helvetica', 15),size=(15,1),key='Add_library'),
sg.Button('Edit',font=('Helvetica', 15),size=(15,1),key='Edit_library'),
sg.Button('Delete',font=('Helvetica', 15),size=(15,1),key='Delete_library')]
]
#Launch window
window_library = sg.Window('Barchine', layout_library, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_library.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
chosen = None
while True: # Event Loop
event, values = window_library.read()
print(event, values)
#Check for menu selection
if(event == 'Home_library'):
contextSwitcher('Library_library','Home_library',window_library)
if(event == 'Ingredients_library'):
contextSwitcher('Library_library','Ingredients_library',window_library)
if(event == 'Stations_library'):
contextSwitcher('Library_library','Stations_library',window_library)
if(event == 'Stats_library'):
contextSwitcher('Library_library','Stats_library',window_library)
if(event == 'Settings_library'):
contextSwitcher('Library_library','Settings_library',window_library)
#When drink item is selected
if event == 'Library_List':
for drink in listDrinks():
if(drink.getName() == values['Library_List'][0]):
chosen = drink
window_library['DRINK_NAME_library'].update(drink.getName())
window_library['ICE_NAME_library'].update('Ice: '+drink.getIce())
window_library['GLASS_NAME_library'].update('Glass: '+drink.getGlass())
window_library['GARNISH_NAME_library'].update('Garnish: '+drink.getGarnish())
window_library['EXTRAS_NAME_library'].update('Extras: '+drink.getExtras())
image = Path('Image_Library/'+drink.getImage())
window_library['image_library'].update(filename=image)
#Retrieve list of ingredients formatted
display = []
for key, value in drink.getIngredients().items():
if(key in shelf):
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
else:
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key)+' [Miss]')
window_library['DrinkIngredients_library'].update(display)
if(event == 'Add_library'):
print(chosen)
DrinkView('new',None,window_library)
chosen = None
#Update list of drinks
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_library['Library_List'].update(values=drinks_pretty)
pass
if(event == 'Edit_library' and chosen is not None):
DrinkView('edit',chosen,window_library)
chosen = None
#Update list of drinks
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_library['Library_List'].update(values=drinks_pretty)
pass
if(event == 'Delete_library' and chosen is not None):
print(chosen)
deleteDrink(chosen.getName())
chosen = None
#Update list of drinks
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_library['Library_List'].update(values=drinks_pretty)
pass
if event in (None, 'Exit'):
window_library.close()
break
if(event == 'GARNISH_NAME_library' and chosen != None):
TextViewExpanded(chosen.getGarnish(),'Garnish',window_library)
if(event == 'EXTRAS_NAME_library' and chosen != None):
TextViewExpanded(chosen.getExtras(),'Extras',window_library)
#Close remaining window
window_library.close()
def IngredientAddPopUp(mode, input_key, input_value, window):
#Temporarily disable host window
window.Disable()
response = None
layout_ingredientaddpopup = [
[sg.Text('MODE',key='mode_name_ingredientaddpopup',font=('Helvetica', 30))],
[sg.Text('Name: ',key='name_text_ingredientaddpopup',font=('Helvetica', 15))
,sg.OptionMenu(getBaseTypes(),key='ingredient_input_ingredientaddpopup',size=(15,10))],
[sg.Text('Amount: ',key='amount_text_ingredientaddpopup',font=('Helvetica', 15))
,sg.Button('',key='amount_input_ingredientaddpopup',size=(4,1))
,sg.Text(' mL',key='unit_ingredientaddpopup',font=('Helvetica', 15))],
[sg.Button('Save',font=('Helvetica', 15),key='save_ingredientaddpopup')
,sg.Button('Exit',font=('Helvetica', 15),key='exit_ingredientaddpopup')],
]
#Launch window
window_ingredientaddpopup = sg.Window('Barchine', layout_ingredientaddpopup,keep_on_top=True,no_titlebar=True).Finalize()
window_ingredientaddpopup.BringToFront()
#Change mode title displayed
if(mode == 'edit'):
window_ingredientaddpopup['mode_name_ingredientaddpopup'].update(value='Edit')
window_ingredientaddpopup['ingredient_input_ingredientaddpopup'].update(value=input_key)
window_ingredientaddpopup['amount_input_ingredientaddpopup'].update(text=input_value)
if(mode == 'new' or mode == 'custom'):
window_ingredientaddpopup['mode_name_ingredientaddpopup'].update(value='New')
#Change displayed options depending on mode
if(mode == 'custom'):
basetypes = set()
for element in Bartender.getShelf():
if(element is not None):
basetypes.add(element.getBase())
window_ingredientaddpopup['ingredient_input_ingredientaddpopup'].update(values=list(basetypes))
while True: # Event Loop
event, values = window_ingredientaddpopup.read()
print(event, values)
if(event == 'amount_input_ingredientaddpopup'):
window_ingredientaddpopup.Disable()
window_ingredientaddpopup['amount_input_ingredientaddpopup'].update(text=Keypad())
window_ingredientaddpopup.Enable()
if(event =='save_ingredientaddpopup'):
if(window_ingredientaddpopup['amount_input_ingredientaddpopup'].GetText()):
response = 'save'
break
else:
print('ERROR: invalid number')
if(event =='exit_ingredientaddpopup'):
response = 'exit'
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_ingredientaddpopup.close()
if(response == 'save'):
return([values['ingredient_input_ingredientaddpopup'],window_ingredientaddpopup['amount_input_ingredientaddpopup'].GetText()])
elif(response == 'exit'):
return([None,None])
def TextViewExpanded(text,title,window):
#Temporarily disable host window
window.Disable()
layout_textviewexpanded = [
[sg.Text(text=title,font=('Helvetica', 20),key='title_textviewexpanded')],
[sg.Text(text=text,font=('Helvetica', 12),key='content_textviewexpanded',size=(25,6))],
[sg.Button('Close',font=('Helvetica', 10),key='close_textviewexpanded')]
]
#Launch window
window_textviewexpanded = sg.Window('Barchine', layout_textviewexpanded,keep_on_top=True,no_titlebar=True).Finalize()
window_textviewexpanded.BringToFront()
while True: # Event Loop
event, values = window_textviewexpanded.read()
print(event, values)
if(event == 'close_textviewexpanded'):
window_textviewexpanded.close()
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_textviewexpanded.close()
def DrinkView(mode,drink,window):
#Temporarily disable host window
window.Disable()
layout_buttons_drinkview = [
[sg.Button('Add',font=('Helvetica', 15),key='add_drinkviewingredient')],
[sg.Button('Edit',font=('Helvetica', 15),key='edit_drinkviewingredient')],
[sg.Button('Remove',font=('Helvetica', 15),key='remove_drinkviewingredient')]
]
layout_drinkview = [
[sg.Text('MODE',key='mode_name_drinkview',font=('Helvetica', 30))],
[sg.Text('Name: ',key='name_text_drinkview',font=('Helvetica', 15)),sg.InputText('DEFAULT NAME',key='name_input_drinkview')],
[sg.Text('Ice: ',key='ice_text_drinkview',font=('Helvetica', 15)),sg.OptionMenu(getIceTypes(),key='ice_input_drinkview')],
[sg.Text('Glass: ',key='glass_text_drinkview',font=('Helvetica', 15)),sg.OptionMenu(getGlassTypes(),key='glass_input_drinkview')],
[sg.Text('Garnish: ',key='garnish_text_drinkview',font=('Helvetica', 15)),sg.InputText('None',key='garnish_input_drinkview')],
[sg.Text('Extras: ',key='extras_text_drinkview',font=('Helvetica', 15)),sg.InputText('None',key='extra_input_drinkview')],
[sg.Input(key='filename_field', visible=False, enable_events=True),sg.FileBrowse(file_types=(('Images', '*.png'),))
,sg.Text('Image: ',key='image_text_drinkview',font=('Helvetica', 15))
,sg.Text('placeholder.png',key='filename_drinkview',font=('Helvetica', 12),size=(20,1))],
[sg.Text('Ingredients',key='ingredients_title',font=('Helvetica', 20)),sg.Text(' ',key='spacer_drinkview',size=(20,1))
,sg.Button('Save',font=('Helvetica', 15),key='save_drinkview'),sg.Button('Exit',font=('Helvetica', 15),key='exit_drinkview')],
#TODO:List drink components here
[sg.Listbox([],size=(20,4),key='DrinkIngredients_drinkview',enable_events=True),
sg.Column(layout_buttons_drinkview)
]
]
#Launch window
window_drinkview = sg.Window('Barchine', layout_drinkview,keep_on_top=True,no_titlebar=True).Finalize()
window_drinkview.BringToFront()
#Set default variable values
new_name = None
new_ice = None
new_glass = None
new_garnish = None
new_extras = None
new_ingredients = {}
new_image = None
#Change mode title displayed
if(mode == 'edit'):
window_drinkview['mode_name_drinkview'].update(value='Edit')
if(mode == 'new'):
window_drinkview['mode_name_drinkview'].update(value='New')
#Change displayed info based on mode
if(mode == 'edit'):
#Retrieve proper drink reference
#Set default variables
new_name = drink.getName()
new_ice = drink.getIce()
new_glass = drink.getGlass()
new_garnish = drink.getGarnish()
new_extras = drink.getExtras()
new_ingredients = drink.getIngredients()
new_image = drink.getImage()
#Retrieve list of ingredients formatted
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
#Update fields
window_drinkview['name_input_drinkview'].update(value=new_name)
window_drinkview['ice_input_drinkview'].update(value=new_ice)
window_drinkview['glass_input_drinkview'].update(value=new_glass)
window_drinkview['garnish_input_drinkview'].update(value=new_garnish)
window_drinkview['extra_input_drinkview'].update(value=new_extras)
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
window_drinkview['filename_drinkview'].update(value=new_image)
window_drinkview['filename_field'].update(value=new_image)
while True: # Event Loop
event, values = window_drinkview.read()
print(event, values)
if(event == 'filename_field'):
print('IMAGE FOUND')
window_drinkview['filename_drinkview'].update(value=re.search('([^\/]*)$', values['filename_field']).group())
if(event =='save_drinkview'):
new_name = values['name_input_drinkview']
if(mode == 'new' and new_name is not None and len(new_ingredients) > 0):
#Load in values
new_ice = values['ice_input_drinkview']
new_glass = values['glass_input_drinkview']
new_garnish = values['garnish_input_drinkview']
new_extras = values['extra_input_drinkview']
if(values['filename_field'][-3:] == 'png'):
new_image = re.search('([^\/]*)$', values['filename_field']).group()
else:
new_image = 'placeholder.png'
check = True
#Check for duplicate name
for drink_element in listDrinks():
if(drink_element.getName() == new_name):
check = False
#Continue saving
if(check):
createDrink(new_name,new_ice,new_glass,new_garnish,new_extras,new_ingredients,new_image,False)
break
else:
print('ERROR: Duplicate name or invalid image file')
pass
if(mode == 'edit'):
#Get changes
new_name = values['name_input_drinkview']
new_ice = values['ice_input_drinkview']
new_glass = values['glass_input_drinkview']
new_garnish = values['garnish_input_drinkview']
new_extras = values['extra_input_drinkview']
if(values['filename_field'][-3:] == 'png'):
new_image = re.search('([^\/]*)$', values['filename_field']).group()
else:
new_image = 'placeholder.png'
check = True
#Check for duplicate name
for drink_element in listDrinks():
if(drink_element.getName() == new_name and new_name != drink.getName()):
check = False
#Continue saving
if(check):
#Apply edits
drink.setName(new_name)
drink.setIce(new_ice)
drink.setGlass(new_glass)
drink.setGarnish(new_garnish)
drink.setExtras(new_extras)
drink.setIngredients(new_ingredients)
drink.setImage(new_image)
listDrinks()
else:
print('ERROR: Duplicate name or invalid image file')
break
if(event =='exit_drinkview'):
break
if(event == 'add_drinkviewingredient'):
new_elements = IngredientAddPopUp('new',None,None,window_drinkview)
if(new_elements[0] is not None):
new_ingredients[new_elements[0]] = int(new_elements[1])
#Update ingredients list
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
if(event == 'edit_drinkviewingredient' and mode == 'edit' and len(values['DrinkIngredients_drinkview']) > 0):
for key, value in new_ingredients.items():
if(key == values['DrinkIngredients_drinkview'][0][values['DrinkIngredients_drinkview'][0].index('- ')+2:]):
#Send values to user field, then replace with returning values
new_elements = IngredientAddPopUp('edit',key,value,window_drinkview)
#Replace entry
if(new_elements[0] is not None):
del new_ingredients[key]
new_ingredients[new_elements[0]] = int(new_elements[1])
#Update ingredients list
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
if(event == 'remove_drinkviewingredient' and len(values['DrinkIngredients_drinkview']) > 0):
for key, value in new_ingredients.items():
if(key == values['DrinkIngredients_drinkview'][0][values['DrinkIngredients_drinkview'][0].index('- ')+2:]):
#Delete from ingredients list
del new_ingredients[key]
#Update ingredients list
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_drinkview.close()
def IngredientsGUI(prev_window):
#Format list of ingredient names
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
ingredientInfo_ingredients = [
[sg.Text('-INGREDIENT_NAME-',key='INGREDIENT_NAME_ingredients',font=('Helvetica', 15),size=(30,1))],
[sg.Text('-FAMILY_NAME-',key='FAMILY_NAME_ingredients',size=(15,1))],
[sg.Text('-BASE_NAME-',key='BASE_NAME_ingredients',size=(15,1))],
[sg.Text('-STARTING_VOLUME-',key='STARTING_VOLUME_NAME_ingredients',size=(24,1))],
[sg.Text('-CURRENT_VOLUME-',key='CURRENT_VOLUME_NAME_ingredients',size=(24,1))]
]
layout_ingredients = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_ingredients')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_ingredients'),
sg.Button('Library',font=('Helvetica', 15),key='Library_ingredients'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_ingredients',border_width=5,button_color=(None,'#60b551')),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_ingredients'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_ingredients'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_ingredients')],
[sg.Listbox(ingredients_pretty,font=('Helvetica', 20),size=(25,8),
key='Ingredients_List',enable_events=True),sg.Column(ingredientInfo_ingredients)],
[sg.Button('Add',font=('Helvetica', 15),size=(15,1),key='Add_ingredients'),
sg.Button('Edit',font=('Helvetica', 15),size=(15,1),key='Edit_ingredients'),
sg.Button('Delete',font=('Helvetica', 15),size=(15,1),key='Delete_ingredients')]
]
#Launch window
window_ingredients = sg.Window('Barchine', layout_ingredients, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_ingredients.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
chosen = None
while True: # Event Loop
event, values = window_ingredients.read()
print(event, values)
#Check for menu selection
if(event == 'Home_ingredients'):
contextSwitcher('Ingredients_ingredients','Home_ingredients',window_ingredients)
if(event == 'Library_ingredients'):
contextSwitcher('Ingredients_ingredients','Library_ingredients',window_ingredients)
if(event == 'Stations_ingredients'):
contextSwitcher('Ingredients_ingredients','Stations_ingredients',window_ingredients)
if(event == 'Stats_ingredients'):
contextSwitcher('Ingredients_ingredients','Stats_ingredients',window_ingredients)
if(event == 'Settings_ingredients'):
contextSwitcher('Ingredients_ingredients','Settings_ingredients',window_ingredients)
#When ingredient item is selected
if event == 'Ingredients_List':
for ingredient in listIngredients():
if(ingredient.getName() == values['Ingredients_List'][0]):
chosen = ingredient
window_ingredients['INGREDIENT_NAME_ingredients'].update(ingredient.getName())
window_ingredients['FAMILY_NAME_ingredients'].update('Family: '+ingredient.getFamily())
window_ingredients['BASE_NAME_ingredients'].update('Base: '+ingredient.getBase())
window_ingredients['STARTING_VOLUME_NAME_ingredients'].update('Starting Volume: '+str(ingredient.getStartVol())+' mL')
window_ingredients['CURRENT_VOLUME_NAME_ingredients'].update('Current Volume: '+str(ingredient.getEndVol())+' mL')
if(event == 'Add_ingredients'):
IngredientView('new',None,window_ingredients)
#Update list of ingredients
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
window_ingredients['Ingredients_List'].update(values=ingredients_pretty)
pass
if(event == 'Edit_ingredients' and chosen is not None):
IngredientView('edit',chosen,window_ingredients)
#Update list of ingredients
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
window_ingredients['Ingredients_List'].update(values=ingredients_pretty)
pass
if(event == 'Delete_ingredients' and chosen is not None):
deleteIngredient(chosen.getName())
chosen = None
#Update list of ingredients
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
window_ingredients['Ingredients_List'].update(values=ingredients_pretty)
pass
if event in (None, 'Exit'):
window_ingredients.close()
break
#Close remaining window
window_ingredients.close()
def IngredientView(mode,ingredient,window):
#Temporarily disable host window
window.Disable()
layout_ingredientview = [
[sg.Text('MODE',key='mode_name_ingredientview',font=('Helvetica', 30))],
[sg.Text('Name: ',key='name_text_ingredientview',font=('Helvetica', 15)),sg.InputText('DEFAULT NAME',key='name_input_ingredientview')],
[sg.Text('Base: ',key='base_text_ingredientview',font=('Helvetica', 15))
,sg.OptionMenu(getBaseTypes(),key='base_input_ingredientview',size=(15,10))],
[sg.Text('Family: ',key='family_text_ingredientview',font=('Helvetica', 15))
,sg.OptionMenu(getFamilyTypes(),key='family_input_ingredientview')],
[sg.Text('Starting Volume: ',key='startvol_text_ingredientview',font=('Helvetica', 15))
,sg.Button('',key='startvol_input_ingredientview',size=(4,1))
,sg.Text(' mL',key='unit1_ingredientview',font=('Helvetica', 15))],
[sg.Text('Current Volume: ',key='endvol_text_ingredientview',font=('Helvetica', 15))
,sg.Button('',key='endvol_input_ingredientview',size=(4,1))
,sg.Text(' mL',key='unit2_ingredientview',font=('Helvetica', 15))],
[sg.Button('Save',font=('Helvetica', 15),key='save_ingredientview'),sg.Button('Exit',font=('Helvetica', 15),key='exit_ingredientview')]
]
#Launch window
window_ingredientview = sg.Window('Barchine', layout_ingredientview,keep_on_top=True,no_titlebar=True).Finalize()
window_ingredientview.BringToFront()
#Initialize default variables
new_name = None
new_base = None
new_family = None
new_startVol = None
new_endVol = None
new_active = False
new_position = -1
#Change mode title displayed
if(mode == 'edit'):
window_ingredientview['mode_name_ingredientview'].update(value='Edit')
if(mode == 'new'):
window_ingredientview['mode_name_ingredientview'].update(value='New')
#Change displayed info based on mode
if(mode == 'edit'):
#Set default variables
new_name = ingredient.getName()
new_base = ingredient.getBase()
new_family = ingredient.getFamily()
new_startVol = ingredient.getStartVol()
new_endVol = ingredient.getEndVol()
new_active = ingredient.isActive()
new_position = ingredient.getPosition()
#Update fields
window_ingredientview['name_input_ingredientview'].update(value=new_name)
window_ingredientview['base_input_ingredientview'].update(value=new_base)
window_ingredientview['family_input_ingredientview'].update(value=new_family)
window_ingredientview['startvol_input_ingredientview'].update(text=new_startVol)
window_ingredientview['endvol_input_ingredientview'].update(text=new_endVol)
while True: # Event Loop
event, values = window_ingredientview.read()
print(event, values)
if(event == 'startvol_input_ingredientview'):
window_ingredientview.Disable()
window_ingredientview['startvol_input_ingredientview'].update(text=Keypad())
window_ingredientview.Enable()
if(event == 'endvol_input_ingredientview'):
window_ingredientview.Disable()
window_ingredientview['endvol_input_ingredientview'].update(text=Keypad())
window_ingredientview.Enable()
if(event == 'save_ingredientview'):
new_name = re.sub('[#@,]','', values['name_input_ingredientview'])
if(mode == 'new' and len(new_name) > 0 and new_name is not None):
#Load in values
new_base = values['base_input_ingredientview']
new_family = values['family_input_ingredientview']
new_startVol = window_ingredientview['startvol_input_ingredientview'].GetText()
new_endVol = window_ingredientview['endvol_input_ingredientview'].GetText()
check = True
#Check for duplicate name
for element in listIngredients():
if(new_name == element.getName()):
check = False
#Ensure volumes are correct
if(new_startVol == '' or new_endVol == ''):
check = False
elif(int(new_startVol) < int(new_endVol)):
check = False
if(check):
print('SAVED')
createIngredient(new_name,new_base,new_family,new_startVol,new_endVol,new_active,new_position,False)
break
pass
if(mode == 'edit'):
#Load in values
new_base = values['base_input_ingredientview']
new_family = values['family_input_ingredientview']
new_startVol = window_ingredientview['startvol_input_ingredientview'].GetText()
new_endVol = window_ingredientview['endvol_input_ingredientview'].GetText()
check = True
#Check for duplicate name
for element in listIngredients():
if(element.getName() == new_name and new_name != ingredient.getName()):
check = False
#Ensure volumes are correct
if(int(new_startVol) < int(new_endVol)):
check = False
if(check):
#Load in values
ingredient.setName(new_name)
ingredient.setBase(new_base)
ingredient.setFamily(new_family)
ingredient.setStartVol(new_startVol)
ingredient.setEndVol(new_endVol)
break
if(event == 'exit_ingredientview'):
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_ingredientview.close()
def StationsGUI(prev_window):
#Image translation
measurebar = Path('Image_Library/measurementbar.png')
#Layout for level indicator image
layout_measure = [
[sg.Text(text='100%',size=(5,1),font=('Helvetica', 8))],
[sg.Image(filename=measurebar,key='image_library',size=(128,140))],
[sg.Text(text='0%',size=(3,1),font=('Helvetica', 12))],
]
#Layouts for alcohol stations
layout_bar1 = [
[sg.Text(text='1',size=(2,1),font=('Helvetica', 12),key='bar1_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar1_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar1_name',enable_events=True)],
]
layout_bar2 = [
[sg.Text(text='2',size=(1,1),font=('Helvetica', 12),key='bar2_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar2_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar2_name',enable_events=True)],
]
layout_bar3 = [
[sg.Text(text='3',size=(1,1),font=('Helvetica', 12),key='bar3_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar3_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar3_name',enable_events=True)],
]
layout_bar4 = [
[sg.Text(text='4',size=(1,1),font=('Helvetica', 12),key='bar4_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar4_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar4_name',enable_events=True)],
]
layout_bar5 = [
[sg.Text(text='5',size=(1,1),font=('Helvetica', 12),key='bar5_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar5_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar5_name',enable_events=True)],
]
layout_bar6 = [
[sg.Text(text='6',size=(1,1),font=('Helvetica', 12),key='bar6_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar6_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar6_name',enable_events=True)],
]
layout_bar7 = [
[sg.Text(text='7',size=(1,1),font=('Helvetica', 12),key='bar7_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar7_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar7_name',enable_events=True)],
]
layout_bar8 = [
[sg.Text(text='8',size=(1,1),font=('Helvetica', 12),key='bar8_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar8_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar8_name',enable_events=True)],
]
layout_bar9 = [
[sg.Text(text='9',size=(1,1),font=('Helvetica', 12),key='bar9_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar9_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar9_name',enable_events=True)],
]
layout_stations = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_stations')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_stations'),
sg.Button('Library',font=('Helvetica', 15),key='Library_stations'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_stations'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_stations',border_width=5,button_color=(None,'#60b551')),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_stations'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_stations')],
[sg.Text(text='Select Station to Edit',size=(30,1),font=('Helvetica', 20),key='subtitle_stations')
,sg.Button('View Mixers',key='station_menu_selector',size=(10,1),font=('Helvetica', 15))],
[sg.Column(layout_measure),sg.Column(layout_bar1,key='bar1_column',visible=True),sg.Column(layout_bar2,key='bar2_column',visible=True),
sg.Column(layout_bar3,key='bar3_column',visible=True),sg.Column(layout_bar4,key='bar4_column',visible=True),
sg.Column(layout_bar5,key='bar5_column',visible=True),sg.Column(layout_bar6,key='bar6_column',visible=True),
sg.Column(layout_bar7,key='bar7_column',visible=True),sg.Column(layout_bar8,key='bar8_column',visible=True),
sg.Column(layout_bar9,key='bar9_column',visible=True)]
]
#Launch window
window_stations = sg.Window('Barchine', layout_stations, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_stations.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
#Pre-unload and reload all stations to remove visibility offset bug
for i in range(1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=False)
window_stations['bar'+str(i)+'_meter'].update(visible=False)
window_stations['bar'+str(i)+'_name'].update(visible=False)
for i in range(1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=True)
window_stations['bar'+str(i)+'_meter'].update(visible=True)
window_stations['bar'+str(i)+'_name'].update(visible=True)
#Draw the currently loaded stations
startIndex = 0
endIndex = 0
offset = 0
#Setup variables for counting alcohol
if(window_stations['station_menu_selector'].GetText() == 'View Mixers'):
startIndex = 0
endIndex = Bartender.getAlcCount()
#Set up variables for counting mixers
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
startIndex = Bartender.getAlcCount()
endIndex = Bartender.getMaxPos()
offset = Bartender.getAlcCount()
for i in range(startIndex,endIndex):
if(Bartender.getShelf()[i]!=None):
item = Bartender.getShelf()[i]
window_stations['bar'+str(i+1-offset)+'_name'].update(value=item.getName())
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(item.getEndVol(),item.getStartVol())
else:
window_stations['bar'+str(i+1-offset)+'_name'].update(value='EMPTY')
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(0,100)
#Increase offset if counting mixers
if(startIndex > Bartender.getAlcCount()):
offset+=1
#Hide/Show leftover stations if unused (mixers)
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_meter'].update(visible=False)
#Reveal hidden stations for alcohol
else:
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_meter'].update(visible=True)
chosen = None
update = False
while True: # Event Loop
event, values = window_stations.read()
print(event, values)
#Check for menu selection
if(event == 'Home_stations'):
contextSwitcher('Stations_stations','Home_stations',window_stations)
if(event == 'Library_stations'):
contextSwitcher('Stations_stations','Library_stations',window_stations)
if(event == 'Ingredients_stations'):
contextSwitcher('Stations_stations','Ingredients_stations',window_stations)
if(event == 'Stats_stations'):
contextSwitcher('Stations_stations','Stats_stations',window_stations)
if(event == 'Settings_stations'):
contextSwitcher('Stations_stations','Settings_stations',window_stations)
#Check for station menu selector
if(event == 'station_menu_selector'):
#If currently looking at alcohol stations, swap to mixers
if(window_stations['station_menu_selector'].GetText() == 'View Mixers'):
window_stations['station_menu_selector'].update(text='View Alcohol')
else:
window_stations['station_menu_selector'].update(text='View Mixers')
update = True
#Search for the selected station
offset = Bartender.getAlcCount()
for i in range(1,Bartender.getMaxPos()):
#Check for currently active station menu
if(window_stations['station_menu_selector'].GetText() == 'View Mixers' and i < Bartender.getAlcCount()+1):
if(event == 'bar'+str(i)+'_name'):
if(Bartender.getShelf()[i-1] == None):
StationsView(str(i),None,'Alcohol',window_stations)
else:
StationsView(str(i),Bartender.getShelf()[i-1],'Alcohol',window_stations)
#Update Display
update = True
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol' and i < Bartender.getMixCount()+1):
if(event == 'bar'+str(i)+'_name'):
if(Bartender.getShelf()[i-1+offset] == None):
StationsView(str(i+offset),None,'Mixer',window_stations)
else:
StationsView(i+offset,Bartender.getShelf()[i-1+offset],'Mixer',window_stations)
#Update Display
update = True
#Update Display
if(update):
#Draw the currently loaded stations
startIndex = 0
endIndex = 0
offset = 0
#Setup variables for counting alcohol
if(window_stations['station_menu_selector'].GetText() == 'View Mixers'):
startIndex = 0
endIndex = Bartender.getAlcCount()
#Set up variables for counting mixers
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
startIndex = Bartender.getAlcCount()
endIndex = Bartender.getMaxPos()
offset = Bartender.getAlcCount()
for i in range(startIndex,endIndex):
if(Bartender.getShelf()[i]!=None):
item = Bartender.getShelf()[i]
window_stations['bar'+str(i+1-offset)+'_name'].update(value=item.getName())
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(item.getEndVol(),item.getStartVol())
else:
window_stations['bar'+str(i+1-offset)+'_name'].update(value='EMPTY')
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(0,100)
#Increase offset if counting mixers
if(startIndex > Bartender.getAlcCount()):
offset+=1
#Hide/Show leftover stations if unused (mixers)
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=False)
window_stations['bar'+str(i)+'_meter'].update(visible=False)
window_stations['bar'+str(i)+'_name'].update(visible=False)
#Reveal hidden stations for alcohol
else:
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=True)
window_stations['bar'+str(i)+'_meter'].update(visible=True)
window_stations['bar'+str(i)+'_name'].update(visible=True)
update=False
if event in (None, 'Exit'):
window_stations.close()
break
#Close remaining window
window_stations.close()
def StationsView(station,ingredient,family,window):
#Temporarily disable host window
window.Disable()
available = ['Empty']
for element in listIngredients():
if not element.isActive() and element.getFamily() == family:
available.append(element.getName())
layout_stationsview = [
[sg.Text('Replace Station ',key='title_stationsview',font=('Helvetica', 30)),sg.Text(station,key='title_num_stationsview',font=('Helvetica', 30))],
[sg.Text('New Ingredient: ',key='ingredient_text_stationsview',font=('Helvetica', 15))
,sg.OptionMenu(available,key='ingredient_input_stationsview')],
[sg.Button('Save',font=('Helvetica', 15),key='save_stationsview'),sg.Button('Exit',font=('Helvetica', 15),key='exit_stationsview')]
]
#Launch window
window_stationsview = sg.Window('Barchine', layout_stationsview,keep_on_top=True,no_titlebar=True).Finalize()
window_stationsview.BringToFront()
#Check for preconditions
if(ingredient is not None):
window_stationsview['ingredient_input_stationsview'].update(value=ingredient.getName())
while True: # Event Loop
event, values = window_stationsview.read()
print(event, values)
if(event == 'save_stationsview'):
#Check if field is set to 'Empty'
if(values['ingredient_input_stationsview'] != 'Empty'):
#Get the replacement ingredient and update fields
for element in listIngredients():
if(element.getName() == values['ingredient_input_stationsview']):
element.setActive(True)
element.setPosition(int(station)-1)
#If exists, update old ingredient
if(ingredient is not None):
for element in listIngredients():
if(element.getName() == ingredient.getName()):
element.setActive(False)
element.setPosition(-1)
break
if(event == 'exit_stationsview'):
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_stationsview.close()
def StatsGUI(prev_window):
layout_stats = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_stats')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_stats'),
sg.Button('Library',font=('Helvetica', 15),key='Library_stats'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_stats'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_stats'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_stats',border_width=5,button_color=(None,'#60b551')),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_stats')],
[sg.Text(text='Stats Page',size=(17,1),font=('Helvetica', 20),key='subtitle_stats')]
]
#Launch window
window_stats = sg.Window('Barchine', layout_stats,keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_stats.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
while True: # Event Loop
event, values = window_stats.read()
print(event, values)
#Check for menu selection
if(event == 'Home_stats'):
contextSwitcher('Stats_stats','Home_stats',window_stats)
if(event == 'Library_stats'):
contextSwitcher('Stats_stats','Library_stats',window_stats)
if(event == 'Ingredients_stats'):
contextSwitcher('Stats_stats','Ingredients_stats',window_stats)
if(event == 'Stations_stats'):
contextSwitcher('Stats_stats','Stations_stats',window_stats)
if(event == 'Settings_stats'):
contextSwitcher('Stats_stats','Settings_stats',window_stats)
if event in (None, 'Exit'):
window_stats.close()
break
#Close remaining window
window_stats.close()
def SettingsGUI(prev_window):
layout_settings = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_settings')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_settings'),
sg.Button('Library',font=('Helvetica', 15),key='Library_settings'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_settings'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_settings'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_settings'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_settings',border_width=5,button_color=(None,'#60b551'))],
[sg.Text(text='Settings Page',size=(17,1),font=('Helvetica', 20),key='subtitle_settings')],
[sg.Button('Save',key='save_settings',font=('Helvetica', 20))
,sg.Button('Reload Bases',key='reload_bases_settings',font=('Helvetica', 20))]
]
#Launch window
window_settings = sg.Window('Barchine', layout_settings,keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_settings.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
while True: # Event Loop
event, values = window_settings.read()
print(event, values)
#Check for menu selection
if(event == 'Home_settings'):
contextSwitcher('Settings_settings','Home_settings',window_settings)
if(event == 'Library_settings'):
contextSwitcher('Settings_settings','Library_settings',window_settings)
if(event == 'Ingredients_settings'):
contextSwitcher('Settings_settings','Ingredients_settings',window_settings)
if(event == 'Stations_settings'):
contextSwitcher('Settings_settings','Stations_settings',window_settings)
if(event == 'Stats_settings'):
contextSwitcher('Settings_settings','Stats_settings',window_settings)
if(event == 'save_settings'):
print('Saving libraries')
storeIngredientLibrary()
storeDrinkLibrary()
print('Saved')
if(event == 'reload_bases_settings'):
restoreBases()
if event in (None, 'Exit'):
window_settings.close()
break
#Close remaining window
window_settings.close()
#Launch default home menu
HomeGUI(None) | mit | -2,645,147,178,557,540,400 | 41.298989 | 167 | 0.569505 | false | 3.767156 | false | false | false |
jmuhlich/indra | indra/preassembler/hierarchy_manager.py | 1 | 11321 | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import rdflib
import logging
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
logger = logging.getLogger('hierarchy_manager')
class HierarchyManager(object):
"""Store hierarchical relationships between different types of entities.
Used to store, e.g., entity hierarchies (proteins and protein families)
and modification hierarchies (serine phosphorylation vs. phosphorylation).
Parameters
----------
rdf_file : string
Path to the RDF file containing the hierarchy.
Attributes
----------
graph : instance of `rdflib.Graph`
The RDF graph containing the hierarchy.
"""
prefixes = """
PREFIX rn: <http://sorger.med.harvard.edu/indra/relations/>
"""
def __init__(self, rdf_file, build_closure=True):
"""Initialize with the path to an RDF file"""
self.graph = rdflib.Graph()
self.graph.parse(rdf_file, format='nt')
self.isa_closure = {}
self.partof_closure = {}
self.components = {}
if build_closure:
self.build_transitive_closures()
# Build reverse lookup dict from the entity hierarchy
self._children = {}
logger.info('Generating reverse lookup table for families')
all_children = set(self.isa_closure.keys()).union(
self.partof_closure.keys())
for child in all_children:
parents = self.get_parents(child)
for parent in parents:
children_list = self._children.get(parent, [])
children_list.append(child)
self._children[parent] = children_list
def build_transitive_closures(self):
"""Build the transitive closures of the hierarchy.
This method constructs dictionaries which contain terms in the
hierarchy as keys and either all the "isa+" or "partof+" related terms
as values.
"""
component_counter = 0
for rel, tc_dict in (('isa', self.isa_closure),
('partof', self.partof_closure)):
qstr = self.prefixes + """
SELECT ?x ?y WHERE {{
{{?x rn:{0}+ ?y .}}
}}
""".format(rel)
res = self.graph.query(qstr)
for x, y in res:
xs = x.toPython()
ys = y.toPython()
try:
tc_dict[xs].append(ys)
except KeyError:
tc_dict[xs] = [ys]
xcomp = self.components.get(xs)
ycomp = self.components.get(ys)
if xcomp is None:
if ycomp is None:
# Neither x nor y are in a component so we start a
# new component and assign x and y to the same
# component
self.components[xs] = component_counter
self.components[ys] = component_counter
component_counter += 1
else:
# Because y is already part of an existing component
# we assign its component to x
self.components[xs] = ycomp
else:
if ycomp is None:
# Because x is already part of an existing component
# we assign its component to y
self.components[ys] = xcomp
else:
# This is a special case in which both x and y are
# parts of components
# If they are in the same component then there's
# nothing further to do
if xcomp == ycomp:
continue
else:
remove_component = max(xcomp, ycomp)
joint_component = min(xcomp, ycomp)
for k, v in self.components.items():
if v == remove_component:
self.components[k] = joint_component
@lru_cache(maxsize=100000)
def find_entity(self, x):
"""
Get the entity that has the specified name (or synonym).
Parameters
----------
x : string
Name or synonym for the target entity.
"""
qstr = self.prefixes + """
SELECT ?x WHERE {{
?x rn:hasName "{0}" .
}}
""".format(x)
res = self.graph.query(qstr)
if list(res):
en = list(res)[0][0].toPython()
return en
else:
return None
def isa(self, ns1, id1, ns2, id2):
"""Indicate whether one entity has an "isa" relationship to another.
Parameters
----------
ns1 : string
Namespace code for an entity.
id1 : string
URI for an entity.
ns2 : string
Namespace code for an entity.
id2 : string
URI for an entity.
Returns
-------
bool
True if t1 has an "isa" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
# if id2 is None, or both are None, then it's by definition isa:
if id2 is None or (id2 is None and id1 is None):
return True
# If only id1 is None, then it cannot be isa
elif id1 is None:
return False
if self.isa_closure:
term1 = self.get_uri(ns1, id1)
term2 = self.get_uri(ns2, id2)
ec = self.isa_closure.get(term1)
if ec is not None and term2 in ec:
return True
else:
return False
else:
return self.query_rdf(id1, 'rn:isa+', id2)
def partof(self, ns1, id1, ns2, id2):
"""Indicate whether one entity is physically part of another.
Parameters
----------
ns1 : string
Namespace code for an entity.
id1 : string
URI for an entity.
ns2 : string
Namespace code for an entity.
id2 : string
URI for an entity.
Returns
-------
bool
True if t1 has a "partof" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
# if id2 is None, or both are None, then it's by definition isa:
if id2 is None or (id2 is None and id1 is None):
return True
# If only id1 is None, then it cannot be isa
elif id1 is None:
return False
if self.partof_closure:
term1 = self.get_uri(ns1, id1)
term2 = self.get_uri(ns2, id2)
ec = self.partof_closure.get(term1)
if ec is not None and term2 in ec:
return True
else:
return False
else:
return self.query_rdf(id1, 'rn:partof+', id2)
def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
immediate_parents = set(self.isa_closure.get(uri, [])).union(
set(self.partof_closure.get(uri, [])))
if type == 'immediate':
return immediate_parents
all_parents = set()
for parent in immediate_parents:
grandparents = self.get_parents(parent, type='all')
all_parents = all_parents.union(grandparents)
all_parents = all_parents.union(immediate_parents)
if type == 'all':
return all_parents
else:
top_parents = set()
for parent in all_parents:
if not self.get_parents(parent, type='immediate'):
top_parents.add(parent)
return top_parents
return
def get_children(self, uri):
"""Return all (not just immediate) children of a given entry.
Parameters
----------
uri : str
The URI of the entry whose children are to be returned. See the
get_uri method to construct this URI from a name space and id.
"""
children = self._children.get(uri, [])
return children
@lru_cache(maxsize=100000)
def query_rdf(self, id1, rel, id2):
term1 = self.find_entity(id1)
term2 = self.find_entity(id2)
qstr = self.prefixes + """
SELECT (COUNT(*) as ?s) WHERE {{
<{}> {} <{}> .
}}
""".format(term1, rel, term2)
res = self.graph.query(qstr)
count = [r[0] for r in res][0]
if count.toPython() == 1:
return True
else:
return False
@staticmethod
def get_uri(ns, id):
if ns == 'HGNC':
return 'http://identifiers.org/hgnc.symbol/' + id
elif ns == 'UP':
return 'http://identifiers.org/uniprot/' + id
elif ns == 'BE' or ns == 'INDRA':
return 'http://sorger.med.harvard.edu/indra/entities/' + id
else:
raise ValueError('Unknown namespace %s' % ns)
# Load the default entity and modification hierarchies
entity_file_path = os.path.join(os.path.dirname(__file__),
'../resources/entity_hierarchy.rdf')
mod_file_path = os.path.join(os.path.dirname(__file__),
'../resources/modification_hierarchy.rdf')
act_file_path = os.path.join(os.path.dirname(__file__),
'../resources/activity_hierarchy.rdf')
ccomp_file_path = os.path.join(os.path.dirname(__file__),
'../resources/cellular_component_hierarchy.rdf')
"""Default entity hierarchy loaded from the RDF file at
`resources/entity_hierarchy.rdf`."""
entity_hierarchy = HierarchyManager(entity_file_path, build_closure=True)
"""Default modification hierarchy loaded from the RDF file at
`resources/modification_hierarchy.rdf`."""
modification_hierarchy = HierarchyManager(mod_file_path, build_closure=True)
"""Default activity hierarchy loaded from the RDF file at
`resources/activity_hierarchy.rdf`."""
activity_hierarchy = HierarchyManager(act_file_path, build_closure=True)
"""Default cellular_component hierarchy loaded from the RDF file at
`resources/cellular_component_hierarchy.rdf`."""
ccomp_hierarchy = HierarchyManager(ccomp_file_path, build_closure=False)
hierarchies = {'entity': entity_hierarchy,
'modification': modification_hierarchy,
'activity': activity_hierarchy,
'cellular_component': ccomp_hierarchy}
| bsd-2-clause | 83,850,203,100,852,430 | 35.756494 | 78 | 0.536083 | false | 4.403345 | false | false | false |
wisdomchuck/TestBot | utils/fun/lists.py | 1 | 2804 | # Image urls for the psat command
psat_memes = [
"http://i.imgur.com/5eJ5DbU.jpg",
"http://i.imgur.com/HBDnWVc.jpg",
"http://i.imgur.com/RzZlq2j.jpg",
"http://i.imgur.com/mVRNUIG.jpg",
"http://i.imgur.com/OvOmC6g.jpg",
"http://i.imgur.com/QqlSxaZ.png",
"http://i.imgur.com/finNuzx.jpg",
"http://i.imgur.com/XB2nBmz.png",
"http://i.imgur.com/7sCwNXl.jpg",
"http://i.imgur.com/caw6Pao.png",
"http://i.imgur.com/GwV0JYL.png"
]
# Response for the 8ball command
magic_conch_shell = [
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes definitely",
"You may rely on it",
"As I see it yes",
"Most likely",
"Outlook good",
"Yes",
"Signs point to yes",
"Reply hazy try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"
]
# Insults for the insult command
insults = [
"is a fucking pedophile",
"is a nigger",
"is so insecure about his penis size because it is smaller than a babies",
"is just a fucking sterotypical 12 year old saying shit like \"I fucked your mom\" and other shit",
"is a fucking disguisting, disgraceful, ignorant, pathetic, and discriminative weeaboo!",
"is a child molester",
"has a kink with 80 year old men",
"is the type of person who loves to fap to little girls",
"has no other purpose in life other than to be retarded and waste people's time",
"needs to kill itself",
"is the definition of faggot",
"has a gamertag, and it is I_Like_To_Rape_Children",
"loves to fap to discord bots",
"wants the d",
"has no life",
"is a furry",
"is a furfag",
"is a worthless piece of shit",
"is an 80 year old man",
"lost his virginity to his grandpa",
"supports abortion",
"is a cuntrag",
"is on the sex offender list"
]
# Drunk lines for the actdrunk command
drunkaf = [
"UDNDUNDUNDUNDUDNUDNDUNDUNDUNDUNDUNDUDNUDNDUNDUNDUNDUNDUNDUNDUNDNUDNDUN",
"AMERICAN IDIOTS YAAAS",
"HEH HEH HEH HEH IM SO FUKED UP LOL",
"lol Ill fuk u up n@4f3 fucjing fite me4",
"fite me u lil fuck",
"i have somethin to tell you: fedc",
"weeeeeeew",
"\*falls*",
"lol wana fuk some suc tonight #5SdE2@"
]
# Image urls for the honk command
honkhonkfgt = [
"https://i.imgur.com/c53XQCI.gif",
"https://i.imgur.com/ObWBP14.png",
"https://i.imgur.com/RZP2tB4.jpg",
"https://i.imgur.com/oxQ083P.gif",
"https://i.imgur.com/byBB7ln.jpg",
"https://i.imgur.com/NvUiLGG.gif",
"https://i.imgur.com/QDyvO4x.jpg",
"https://i.imgur.com/HtrRYSS.png",
"https://i.imgur.com/bvrFQnX.jpg"
]
| gpl-3.0 | -5,187,740,992,660,968,000 | 29.813187 | 103 | 0.634807 | false | 2.593895 | false | false | false |
ProjectQ-Framework/FermiLib-Plugin-Psi4 | fermilibpluginpsi4/_run_psi4.py | 1 | 8415 | # FermiLib plugin to interface with Psi4
#
# Copyright (C) 2017 ProjectQ-Framework (www.projectq.ch)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functions to prepare psi4 input and run calculations."""
from __future__ import absolute_import
import os
import re
import subprocess
def create_geometry_string(geometry):
"""This function converts MolecularData geometry to psi4 geometry.
Args:
geometry: A list of tuples giving the coordinates of each atom.
example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))]. Distances in
angstrom. Use atomic symbols to specify atoms.
Returns:
geo_string: A string giving the geometry for each atom on a line, e.g.:
H 0. 0. 0.
H 0. 0. 0.7414
"""
geo_string = ''
for item in geometry:
atom = item[0]
coordinates = item[1]
line = '{} {} {} {}'.format(atom,
coordinates[0],
coordinates[1],
coordinates[2])
if len(geo_string) > 0:
geo_string += '\n'
geo_string += line
return geo_string
def generate_psi4_input(molecule,
run_scf,
run_mp2,
run_cisd,
run_ccsd,
run_fci,
verbose,
tolerate_error,
memory,
template_file):
"""This function creates and saves a psi4 input file.
Args:
molecule: An instance of the MolecularData class.
run_scf: Boolean to run SCF calculation.
run_mp2: Boolean to run MP2 calculation.
run_cisd: Boolean to run CISD calculation.
run_ccsd: Boolean to run CCSD calculation.
run_fci: Boolean to FCI calculation.
verbose: Boolean whether to print calculation results to screen.
tolerate_error: Whether to fail or merely warn when Psi4 fails.
memory: Int giving amount of memory to allocate in MB.
template_file(str): Specify the filename of a Psi4 template
Returns:
input_file: A string giving the name of the saved input file.
"""
# Create Psi4 geometry string.
geo_string = create_geometry_string(molecule.geometry)
# Find the psi4_directory.
psi4_directory = os.path.dirname(os.path.realpath(__file__))
# Parse input template.
if template_file is None:
template_file = psi4_directory + '/_psi4_template'
input_template = []
with open(template_file, 'r') as stream:
for line in stream:
input_template += [line]
# Populate contents of input file based on automatic parameters.
input_content = [re.sub('&THIS_DIRECTORY',
psi4_directory, line)
for line in input_template]
# Populate contents of input file based on MolecularData parameters.
input_content = [re.sub('&geometry', str(molecule.geometry), line)
for line in input_content]
input_content = [re.sub('&basis', molecule.basis, line)
for line in input_content]
input_content = [re.sub('&charge', str(molecule.charge), line)
for line in input_content]
input_content = [re.sub('&multiplicity', str(molecule.multiplicity), line)
for line in input_content]
input_content = [re.sub('&description', str(molecule.description), line)
for line in input_content]
input_content = [re.sub('&mol_filename', str(molecule.filename), line)
for line in input_content]
input_content = [re.sub('&geo_string', geo_string, line)
for line in input_content]
# Populate contents of input file based on provided calculation parameters.
input_content = [re.sub('&run_scf', str(run_scf), line)
for line in input_content]
input_content = [re.sub('&run_mp2', str(run_mp2), line)
for line in input_content]
input_content = [re.sub('&run_cisd', str(run_cisd), line)
for line in input_content]
input_content = [re.sub('&run_ccsd', str(run_ccsd), line)
for line in input_content]
input_content = [re.sub('&run_fci', str(run_fci), line)
for line in input_content]
input_content = [re.sub('&tolerate_error', str(tolerate_error), line)
for line in input_content]
input_content = [re.sub('&verbose', str(verbose), line)
for line in input_content]
input_content = [re.sub('&memory', str(memory), line)
for line in input_content]
# Write input file and return handle.
input_file = molecule.filename + '.inp'
with open(input_file, 'w') as stream:
stream.write(''.join(input_content))
return input_file
def clean_up(molecule, delete_input=True, delete_output=False):
input_file = molecule.filename + '.inp'
output_file = molecule.filename + '.out'
run_directory = os.getcwd()
for local_file in os.listdir(run_directory):
if local_file.endswith('.clean'):
os.remove(run_directory + '/' + local_file)
try:
os.remove('timer.dat')
except:
pass
if delete_input:
os.remove(input_file)
if delete_output:
os.remove(output_file)
def run_psi4(molecule,
run_scf=True,
run_mp2=False,
run_cisd=False,
run_ccsd=False,
run_fci=False,
verbose=False,
tolerate_error=False,
delete_input=True,
delete_output=False,
memory=8000,
template_file=None):
"""This function runs a Psi4 calculation.
Args:
molecule: An instance of the MolecularData class.
run_scf: Optional boolean to run SCF calculation.
run_mp2: Optional boolean to run MP2 calculation.
run_cisd: Optional boolean to run CISD calculation.
run_ccsd: Optional boolean to run CCSD calculation.
run_fci: Optional boolean to FCI calculation.
verbose: Boolean whether to print calculation results to screen.
tolerate_error: Optional boolean to warn or raise when Psi4 fails.
delete_input: Optional boolean to delete psi4 input file.
delete_output: Optional boolean to delete psi4 output file.
memory: Optional int giving amount of memory to allocate in MB.
template_file(str): Path to Psi4 template file
Returns:
molecule: The updated MolecularData object.
Raises:
psi4 errors: An error from psi4.
"""
# Prepare input.
input_file = generate_psi4_input(molecule,
run_scf,
run_mp2,
run_cisd,
run_ccsd,
run_fci,
verbose,
tolerate_error,
memory,
template_file)
# Run psi4.
output_file = molecule.filename + '.out'
try:
process = subprocess.Popen(['psi4', input_file, output_file])
process.wait()
except:
print('Psi4 calculation for {} has failed.'.format(molecule.name))
process.kill()
clean_up(molecule, delete_input, delete_output)
if not tolerate_error:
raise
else:
clean_up(molecule, delete_input, delete_output)
# Return updated molecule instance.
molecule.load()
return molecule
| lgpl-3.0 | 7,315,284,371,011,495,000 | 37.424658 | 79 | 0.579085 | false | 4.226519 | false | false | false |
kubevirt/vAdvisor | vadvisor/virt/parser.py | 1 | 1627 | from xml.etree.ElementTree import XMLParser
class GuestXmlParser:
int_tags = ["currentMemory", "memory"]
int_attribs = ["index", "port", "startport", "vram"]
def __init__(self):
self.json = {}
self.stack = [self.json]
self.catogory = None
def start(self, tag, attrib):
self.tag = tag
for attr in self.int_attribs:
if attrib.get(attr):
attrib[attr] = int(attrib[attr])
if tag in ("devices", "clock"):
self.category = tag
self.stack[-1][tag] = []
self.stack.append(self.stack[-1][tag])
elif tag == "emulator":
self.stack[-2][tag] = attrib
self.stack.append(attrib)
elif isinstance(self.stack[-1], dict):
self.stack[-1][tag] = attrib
self.stack.append(attrib)
elif self.category == "devices":
device = {"family": tag}
device.update(attrib)
self.stack[-1].append(device)
self.stack.append(device)
elif self.category == "clock":
self.stack[-1].append(attrib)
self.stack.append(attrib)
def end(self, tag):
self.stack.pop()
def data(self, data):
if data and data.strip():
if self.tag in self.int_tags:
self.stack[-1]["value"] = int(data)
else:
self.stack[-1]["value"] = data
def close(self):
return self.json
def parse_domain_xml(xml):
target = GuestXmlParser()
parser = XMLParser(target=target)
parser.feed(xml)
return parser.close()
| gpl-3.0 | 795,762,384,408,859,900 | 27.54386 | 56 | 0.533497 | false | 3.828235 | false | false | false |
gri-is/lodjob | crom_scripts/deprecated/acquisition_manual.py | 1 | 1582 | import pprint
from cromulent.model import * # imports models
from cromulent.vocab import * # imports model subcomponents
from utils.aat_labels import aat_labels
from utils.aat_label_fetcher import get_or_fetch
from utils.data_parsing import find_values
from utils.crom_helpers import props, toJSON, toString, printString, printAttr,\
type_maker
make_type = type_maker(vocab='aat:', getter=get_or_fetch,
labels=aat_labels, Class=Type)
# start by looking at knoedler_purchase_info + knoedler tables
a = Acquisition(ident='k-purchase-1000')
place = Place()
place.label = 'Art Gallery'
a.took_place_at = place
# for each seller id use:
# purchase.knoedler_purchase_sellers_collection[0].purchase_seller_uid
# or
# purchase.knoedler_purchase_sellers_collection[0].gpi_people.person_ulan
seller = Actor(ident='ulan-person-23300')
seller.label = 'Seller'
a.transferred_title_from = seller
timespan = TimeSpan()
timespan.label = 'When'
timespan.end_of_the_end = '1890-10-16' # "1890-01-05T00:00:00Z"
timespan.begin_of_the_begin = '1890-10-16' # "1890-01-04T00:00:00Z"
a.timespan = timespan
obj_id = 'k-object-1000'# purchase.knoedler.object_id
obj = ManMadeObject(ident=obj_id)
a.transferred_title_of = obj
# for each buyer id use:
# purchase.knoedler_purchase_buyers_collection[0].purchase_buyer_uid
# or
# purchases.knoedler_purchase_buyers_collection[0].gpi_people.person_ulan
buyer = Group(ident='500304270') # Knoedler's ULAN ID (consider UID instead)
buyer.label = 'Buyer'
a.transferred_title_to = buyer
printString(a)
| agpl-3.0 | -9,017,786,665,425,611,000 | 31.285714 | 80 | 0.733881 | false | 2.790123 | false | false | false |
moggers87/django-bitfield | bitfield/tests/tests.py | 1 | 17505 | from __future__ import absolute_import
import pickle
from django.db import connection, models
from django.db.models import F
from django.test import TestCase
from bitfield import BitHandler, Bit, BitField
from bitfield.tests import BitFieldTestModel, CompositeBitFieldTestModel, BitFieldTestModelForm
from bitfield.compat import bitand, bitor
try:
from django.db.models.base import simple_class_factory # noqa
except ImportError:
# Django 1.5 muffed up the base class which breaks the pickle tests
# Note, it's fixed again in 1.6.
from django.db.models import base
_model_unpickle = base.model_unpickle
def simple_class_factory(model, attrs):
return model
def model_unpickle(model, attrs, factory):
return _model_unpickle(model, attrs)
setattr(base, 'simple_class_factory', simple_class_factory)
setattr(base, 'model_unpickle', model_unpickle)
class BitHandlerTest(TestCase):
def test_comparison(self):
bithandler_1 = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
bithandler_2 = BitHandler(1, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
bithandler_3 = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
assert bithandler_1 == bithandler_1
assert bithandler_1 != bithandler_2
assert bithandler_1 == bithandler_3
def test_defaults(self):
bithandler = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
# Default value of 0.
self.assertEquals(int(bithandler), 0)
# Test bit numbers.
self.assertEquals(int(bithandler.FLAG_0.number), 0)
self.assertEquals(int(bithandler.FLAG_1.number), 1)
self.assertEquals(int(bithandler.FLAG_2.number), 2)
self.assertEquals(int(bithandler.FLAG_3.number), 3)
# Negative test non-existant key.
self.assertRaises(AttributeError, lambda: bithandler.FLAG_4)
# Test bool().
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
def test_nonzero_default(self):
bithandler = BitHandler(1, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), True)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(2, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), True)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(3, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), True)
self.assertEquals(bool(bithandler.FLAG_1), True)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(4, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), True)
self.assertEquals(bool(bithandler.FLAG_3), False)
def test_mutation(self):
bithandler = BitHandler(0, ('FLAG_0', 'FLAG_1', 'FLAG_2', 'FLAG_3'))
self.assertEquals(bool(bithandler.FLAG_0), False)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(bithandler | 1, bithandler._keys)
self.assertEquals(bool(bithandler.FLAG_0), True)
self.assertEquals(bool(bithandler.FLAG_1), False)
self.assertEquals(bool(bithandler.FLAG_2), False)
self.assertEquals(bool(bithandler.FLAG_3), False)
bithandler ^= 3
self.assertEquals(int(bithandler), 2)
self.assertEquals(bool(bithandler & 1), False)
bithandler.FLAG_0 = False
self.assertEquals(bithandler.FLAG_0, False)
bithandler.FLAG_1 = True
self.assertEquals(bithandler.FLAG_0, False)
self.assertEquals(bithandler.FLAG_1, True)
bithandler.FLAG_2 = False
self.assertEquals(bithandler.FLAG_0, False)
self.assertEquals(bithandler.FLAG_1, True)
self.assertEquals(bithandler.FLAG_2, False)
class BitTest(TestCase):
def test_int(self):
bit = Bit(0)
self.assertEquals(int(bit), 1)
self.assertEquals(bool(bit), True)
self.assertFalse(not bit)
def test_comparison(self):
self.assertEquals(Bit(0), Bit(0))
self.assertNotEquals(Bit(1), Bit(0))
self.assertNotEquals(Bit(0, 0), Bit(0, 1))
self.assertEquals(Bit(0, 1), Bit(0, 1))
self.assertEquals(Bit(0), 1)
def test_and(self):
self.assertEquals(1 & Bit(2), 0)
self.assertEquals(1 & Bit(0), 1)
self.assertEquals(1 & ~Bit(0), 0)
self.assertEquals(Bit(0) & Bit(2), 0)
self.assertEquals(Bit(0) & Bit(0), 1)
self.assertEquals(Bit(0) & ~Bit(0), 0)
def test_or(self):
self.assertEquals(1 | Bit(2), 5)
self.assertEquals(1 | Bit(5), 33)
self.assertEquals(1 | ~Bit(2), -5)
self.assertEquals(Bit(0) | Bit(2), 5)
self.assertEquals(Bit(0) | Bit(5), 33)
self.assertEquals(Bit(0) | ~Bit(2), -5)
def test_xor(self):
self.assertEquals(1 ^ Bit(2), 5)
self.assertEquals(1 ^ Bit(0), 0)
self.assertEquals(1 ^ Bit(1), 3)
self.assertEquals(1 ^ Bit(5), 33)
self.assertEquals(1 ^ ~Bit(2), -6)
self.assertEquals(Bit(0) ^ Bit(2), 5)
self.assertEquals(Bit(0) ^ Bit(0), 0)
self.assertEquals(Bit(0) ^ Bit(1), 3)
self.assertEquals(Bit(0) ^ Bit(5), 33)
self.assertEquals(Bit(0) ^ ~Bit(2), -6)
class BitFieldTest(TestCase):
def test_basic(self):
# Create instance and make sure flags are working properly.
instance = BitFieldTestModel.objects.create(flags=1)
self.assertTrue(instance.flags.FLAG_0)
self.assertFalse(instance.flags.FLAG_1)
self.assertFalse(instance.flags.FLAG_2)
self.assertFalse(instance.flags.FLAG_3)
def test_regression_1425(self):
# Creating new instances shouldn't allow negative values.
instance = BitFieldTestModel.objects.create(flags=-1)
self.assertEqual(instance.flags._value, 15)
self.assertTrue(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertTrue(instance.flags.FLAG_2)
self.assertTrue(instance.flags.FLAG_3)
cursor = connection.cursor()
flags_field = BitFieldTestModel._meta.get_field_by_name('flags')[0]
flags_db_column = flags_field.db_column or flags_field.name
cursor.execute("INSERT INTO %s (%s) VALUES (-1)" % (BitFieldTestModel._meta.db_table, flags_db_column))
# There should only be the one row we inserted through the cursor.
instance = BitFieldTestModel.objects.get(flags=-1)
self.assertTrue(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertTrue(instance.flags.FLAG_2)
self.assertTrue(instance.flags.FLAG_3)
instance.save()
self.assertEqual(BitFieldTestModel.objects.filter(flags=15).count(), 2)
self.assertEqual(BitFieldTestModel.objects.filter(flags__lt=0).count(), 0)
def test_select(self):
BitFieldTestModel.objects.create(flags=3)
self.assertTrue(BitFieldTestModel.objects.filter(flags=BitFieldTestModel.flags.FLAG_1).exists())
self.assertTrue(BitFieldTestModel.objects.filter(flags=BitFieldTestModel.flags.FLAG_0).exists())
self.assertFalse(BitFieldTestModel.objects.exclude(flags=BitFieldTestModel.flags.FLAG_0).exists())
self.assertFalse(BitFieldTestModel.objects.exclude(flags=BitFieldTestModel.flags.FLAG_1).exists())
def test_update(self):
instance = BitFieldTestModel.objects.create(flags=0)
self.assertFalse(instance.flags.FLAG_0)
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitor(F('flags'), BitFieldTestModel.flags.FLAG_1))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertTrue(instance.flags.FLAG_1)
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitor(F('flags'), ((~BitFieldTestModel.flags.FLAG_0 | BitFieldTestModel.flags.FLAG_3))))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertFalse(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertTrue(instance.flags.FLAG_3)
self.assertFalse(BitFieldTestModel.objects.filter(flags=BitFieldTestModel.flags.FLAG_0).exists())
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitand(F('flags'), ~BitFieldTestModel.flags.FLAG_3))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertFalse(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertFalse(instance.flags.FLAG_3)
def test_update_with_handler(self):
instance = BitFieldTestModel.objects.create(flags=0)
self.assertFalse(instance.flags.FLAG_0)
instance.flags.FLAG_1 = True
BitFieldTestModel.objects.filter(pk=instance.pk).update(flags=bitor(F('flags'), instance.flags))
instance = BitFieldTestModel.objects.get(pk=instance.pk)
self.assertTrue(instance.flags.FLAG_1)
def test_negate(self):
BitFieldTestModel.objects.create(flags=BitFieldTestModel.flags.FLAG_0 | BitFieldTestModel.flags.FLAG_1)
BitFieldTestModel.objects.create(flags=BitFieldTestModel.flags.FLAG_1)
self.assertEqual(BitFieldTestModel.objects.filter(flags=~BitFieldTestModel.flags.FLAG_0).count(), 1)
self.assertEqual(BitFieldTestModel.objects.filter(flags=~BitFieldTestModel.flags.FLAG_1).count(), 0)
self.assertEqual(BitFieldTestModel.objects.filter(flags=~BitFieldTestModel.flags.FLAG_2).count(), 2)
def test_default_value(self):
instance = BitFieldTestModel.objects.create()
self.assertTrue(instance.flags.FLAG_0)
self.assertTrue(instance.flags.FLAG_1)
self.assertFalse(instance.flags.FLAG_2)
self.assertFalse(instance.flags.FLAG_3)
def test_binary_capacity(self):
import math
from django.db.models.fields import BigIntegerField
# Local maximum value, slow canonical algorithm
MAX_COUNT = int(math.floor(math.log(BigIntegerField.MAX_BIGINT, 2)))
# Big flags list
flags = ['f' + str(i) for i in range(100)]
try:
BitField(flags=flags[:MAX_COUNT])
except ValueError:
self.fail("It should work well with these flags")
self.assertRaises(ValueError, BitField, flags=flags[:(MAX_COUNT + 1)])
def test_dictionary_init(self):
flags = {
0: 'zero',
1: 'first',
10: 'tenth',
2: 'second',
'wrongkey': 'wrongkey',
100: 'bigkey',
-100: 'smallkey',
}
try:
bf = BitField(flags)
except ValueError:
self.fail("It should work well with these flags")
self.assertEquals(bf.flags, ['zero', 'first', 'second', '', '', '', '', '', '', '', 'tenth'])
self.assertRaises(ValueError, BitField, flags={})
self.assertRaises(ValueError, BitField, flags={'wrongkey': 'wrongkey'})
self.assertRaises(ValueError, BitField, flags={'1': 'non_int_key'})
def test_defaults_as_key_names(self):
class TestModel(models.Model):
flags = BitField(flags=(
'FLAG_0',
'FLAG_1',
'FLAG_2',
'FLAG_3',
), default=('FLAG_1', 'FLAG_2'))
field = TestModel._meta.get_field('flags')
self.assertEquals(field.default, TestModel.flags.FLAG_1 | TestModel.flags.FLAG_2)
class BitFieldSerializationTest(TestCase):
def test_can_unserialize_bithandler(self):
data = b"cdjango.db.models.base\nmodel_unpickle\np0\n(cbitfield.tests.models\nBitFieldTestModel\np1\n(lp2\ncdjango.db.models.base\nsimple_class_factory\np3\ntp4\nRp5\n(dp6\nS'flags'\np7\nccopy_reg\n_reconstructor\np8\n(cbitfield.types\nBitHandler\np9\nc__builtin__\nobject\np10\nNtp11\nRp12\n(dp13\nS'_value'\np14\nI1\nsS'_keys'\np15\n(S'FLAG_0'\np16\nS'FLAG_1'\np17\nS'FLAG_2'\np18\nS'FLAG_3'\np19\ntp20\nsbsS'_state'\np21\ng8\n(cdjango.db.models.base\nModelState\np22\ng10\nNtp23\nRp24\n(dp25\nS'adding'\np26\nI00\nsS'db'\np27\nS'default'\np28\nsbsS'id'\np29\nI1\nsb."
inst = pickle.loads(data)
self.assertTrue(inst.flags.FLAG_0)
self.assertFalse(inst.flags.FLAG_1)
def test_pickle_integration(self):
inst = BitFieldTestModel.objects.create(flags=1)
data = pickle.dumps(inst)
inst = pickle.loads(data)
self.assertEquals(type(inst.flags), BitHandler)
self.assertEquals(int(inst.flags), 1)
def test_added_field(self):
data = b"cdjango.db.models.base\nmodel_unpickle\np0\n(cbitfield.tests.models\nBitFieldTestModel\np1\n(lp2\ncdjango.db.models.base\nsimple_class_factory\np3\ntp4\nRp5\n(dp6\nS'flags'\np7\nccopy_reg\n_reconstructor\np8\n(cbitfield.types\nBitHandler\np9\nc__builtin__\nobject\np10\nNtp11\nRp12\n(dp13\nS'_value'\np14\nI1\nsS'_keys'\np15\n(S'FLAG_0'\np16\nS'FLAG_1'\np17\nS'FLAG_2'\np18\ntp19\nsbsS'_state'\np20\ng8\n(cdjango.db.models.base\nModelState\np21\ng10\nNtp22\nRp23\n(dp24\nS'adding'\np25\nI00\nsS'db'\np27\nS'default'\np27\nsbsS'id'\np28\nI1\nsb."
inst = pickle.loads(data)
self.assertTrue('FLAG_3' in inst.flags.keys())
class CompositeBitFieldTest(TestCase):
def test_get_flag(self):
inst = CompositeBitFieldTestModel()
self.assertEqual(inst.flags.FLAG_0, inst.flags_1.FLAG_0)
self.assertEqual(inst.flags.FLAG_4, inst.flags_2.FLAG_4)
self.assertRaises(AttributeError, lambda: inst.flags.flag_NA)
def test_set_flag(self):
inst = CompositeBitFieldTestModel()
flag_0_original = bool(inst.flags.FLAG_0)
self.assertEqual(bool(inst.flags_1.FLAG_0), flag_0_original)
flag_4_original = bool(inst.flags.FLAG_4)
self.assertEqual(bool(inst.flags_2.FLAG_4), flag_4_original)
# flip flags' bits
inst.flags.FLAG_0 = not flag_0_original
inst.flags.FLAG_4 = not flag_4_original
# check to make sure the bit flips took effect
self.assertNotEqual(bool(inst.flags.FLAG_0), flag_0_original)
self.assertNotEqual(bool(inst.flags_1.FLAG_0), flag_0_original)
self.assertNotEqual(bool(inst.flags.FLAG_4), flag_4_original)
self.assertNotEqual(bool(inst.flags_2.FLAG_4), flag_4_original)
def set_flag():
inst.flags.flag_NA = False
self.assertRaises(AttributeError, set_flag)
def test_hasattr(self):
inst = CompositeBitFieldTestModel()
self.assertEqual(hasattr(inst.flags, 'flag_0'),
hasattr(inst.flags_1, 'flag_0'))
self.assertEqual(hasattr(inst.flags, 'flag_4'),
hasattr(inst.flags_2, 'flag_4'))
class BitFormFieldTest(TestCase):
def test_form_new_invalid(self):
invalid_data_dicts = [
{'flags': ['FLAG_0', 'FLAG_FLAG']},
{'flags': ['FLAG_4']},
{'flags': [1, 2]}
]
for invalid_data in invalid_data_dicts:
form = BitFieldTestModelForm(data=invalid_data)
self.assertFalse(form.is_valid())
def test_form_new(self):
data_dicts = [
{'flags': ['FLAG_0', 'FLAG_1']},
{'flags': ['FLAG_3']},
{'flags': []},
{}
]
for data in data_dicts:
form = BitFieldTestModelForm(data=data)
self.failUnless(form.is_valid())
instance = form.save()
flags = data['flags'] if 'flags' in data else []
for k in BitFieldTestModel.flags:
self.assertEquals(bool(getattr(instance.flags, k)), k in flags)
def test_form_update(self):
instance = BitFieldTestModel.objects.create(flags=0)
for k in BitFieldTestModel.flags:
self.assertFalse(bool(getattr(instance.flags, k)))
data = {'flags': ['FLAG_0', 'FLAG_1']}
form = BitFieldTestModelForm(data=data, instance=instance)
self.failUnless(form.is_valid())
instance = form.save()
for k in BitFieldTestModel.flags:
self.assertEquals(bool(getattr(instance.flags, k)), k in data['flags'])
data = {'flags': ['FLAG_2', 'FLAG_3']}
form = BitFieldTestModelForm(data=data, instance=instance)
self.failUnless(form.is_valid())
instance = form.save()
for k in BitFieldTestModel.flags:
self.assertEquals(bool(getattr(instance.flags, k)), k in data['flags'])
data = {'flags': []}
form = BitFieldTestModelForm(data=data, instance=instance)
self.failUnless(form.is_valid())
instance = form.save()
for k in BitFieldTestModel.flags:
self.assertFalse(bool(getattr(instance.flags, k)))
| apache-2.0 | -1,557,695,423,243,655,400 | 42.87218 | 578 | 0.649357 | false | 3.406305 | true | false | false |
adewynter/Tools | MLandDS/SpeechRecognition/dataPreprocessor.py | 1 | 5683 | # Data preprocessor for speech recognition
# Arguably the most important part of our infrastructure
# WARNING -- Disgustingly long class
# (c) Adrian deWynter, 2017, where applicable
from __future__ import print_function
from six.moves import urllib,xrange
from random import shuffle
from enum import Enum
import os,re,gzip,wave
import skimage.io
import numpy as np
CHUNK = 4096
width=512
height=512
pcm_path = "data/spoken_numbers_pcm/" # 8 bit
wav_path = "data/spoken_numbers_wav/" # 16 bit s16le
PATH = pcm_path
############
# Misc utils
############
def speaker(wav):
return re.sub(r'_.*', '', wav[2:])
def get_speakers(local_path=PATH):
files = os.listdir(local_path)
return list(set(map(speaker,files)))
def load_wav_file(name):
file = wave.open(name, "rb")
chunk = []
data0 = file.readframes(CHUNK)
while data0 != '':
data = numpy.fromstring(data0, dtype='uint8') # Alter datatype for efficiency
data = (data + 128) / 255. # 0-1 for Better convergence
chunk.extend(data)
data0 = file.readframes(CHUNK)
chunk = chunk[0:CHUNK * 2]
chunk.extend(numpy.zeros(CHUNK * 2 - len(chunk))) # Pad
# I think the file should be closed, no?
return chunk
##############
# Batch utils
##############
def spectro_batch(batch_size=10):
return spectro_batch_generator(batch_size)
def spectro_batch_generator(batch_size,width=64,local_path="data/spoken_numbers_64x64/"):
batch,labels = [],[]
files=os.listdir(local_path)
while True:
shuffle(files)
for image_name in files:
image = skimage.io.imread(local_path+image_name).astype(numpy.float32)
data = image/255. # 0-1 for better convergence
data = data.reshape([width*height]) # tensorflow.matmul needs flattened matrices wtf
batch.append(list(data))
labels.append(dense_to_one_hot(int(image_name[0])))
if len(batch) >= batch_size:
yield batch, labels
batch = []
labels = []
def word_batch_generator(batch_size=10,target=Target.word,local_path=PATH):
batch_waves = []
labels = []
speakers=get_speakers()
files = os.listdir(local_path)
while True:
shuffle(files)
for wav in files:
if not wav.endswith(".png"):continue
if target==Target.digits: labels.append(dense_to_one_hot(int(wav[0])))
if target==Target.speaker: labels.append(one_hot_from_item(speaker(wav), speakers))
chunk = load_wav_file(local_path+wav)
batch_waves.append(chunk)
# batch_waves.append(chunks[input_width])
if len(batch_waves) >= batch_size:
yield batch_waves, labels
batch_waves = [] # Reset for next batch
labels = []
def wave_batch_generator(batch_size=10,target=Target.speaker,local_path=PATH):
batch_waves,labels = [],[]
speakers=get_speakers()
files = os.listdir(local_path)
while True:
shuffle(files)
for wav in files:
if not wav.endswith(".wav"):continue
if target==Target.digits: labels.append(dense_to_one_hot(int(wav[0])))
if target==Target.speaker: labels.append(one_hot_from_item(speaker(wav), speakers))
chunk = load_wav_file(local_path+wav)
batch_waves.append(chunk)
# batch_waves.append(chunks[input_width])
if len(batch_waves) >= batch_size:
yield batch_waves, labels
batch_waves = []
labels = []
##########
# Classes
##########
# Labels (orthogonal features)
class Target(Enum):
digits=1
speaker=2
words_per_minute=3
word_phonemes=4
word=5
sentence=6
sentiment=7
# Data set
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False, load=False):
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
num = len(images)
assert num == len(labels), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
print("len(images) %d" % num)
self._num_examples = num
self.cache={}
self._image_names = numpy.array(images)
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
self._images=[]
if load:
self._images=self.load(self._image_names)
@property
def images(self):
return self._images
@property
def image_names(self):
return self._image_names
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
# only apply to a subset of all images at one time
def load(self,image_names):
print("loading %d images"%len(image_names))
return list(map(self.load_image,image_names)) # python3 map object WTF
def load_image(self,image_name):
if image_name in self.cache:
return self.cache[image_name]
else:
image = skimage.io.imread(DATA_DIR+ image_name).astype(numpy.float32)
# images = numpy.multiply(images, 1.0 / 255.0)
self.cache[image_name]=image
return image
# Return the next batch_size examples
def next_batch(self, batch_size, fake_data=False):
if fake_data:
fake_image = [1] * width * height
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._image_names = self._image_names[perm]
self._labels = self._labels[perm]
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self.load(self._image_names[start:end]), self._labels[start:end]
if __name__ == "__main__":
pass | mit | -1,158,133,404,492,838,400 | 22.105691 | 98 | 0.672356 | false | 2.923354 | false | false | false |
MerlijnWajer/lewd | src/net.py | 1 | 1721 | """
This file is part of the LEd Wall Daemon (lewd) project
Copyright (c) 2009-2012 by ``brainsmoke'' and Merlijn Wajer (``Wizzup'')
lewd is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
lewd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with lewd. If not, see <http://www.gnu.org/licenses/>.
See the file COPYING, included in this distribution,
for details about the copyright.
"""
import asyncore, socket
import spiscreen
class LEDConnection(asyncore.dispatcher_with_send):
def __init__(self, conn, sock, addr):
asyncore.dispatcher_with_send.__init__(self, sock)
self.data = ''
def handle_read(self):
data = self.recv(12*10*3)
self.data += data
if len(self.data) < 12*10*3:
return
screen.push_data(self.data[:12*10*3])
self.data = self.data[12*10*3:]
class SocketServer(asyncore.dispatcher):
def __init__(self, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(('', port))
self.listen(5)
def handle_accept(self):
conn, addr = self.accept()
LEDConnection(self, conn, addr)
screen = spiscreen.SPIScreen()
s = SocketServer(8000)
asyncore.loop()
| gpl-3.0 | 3,525,649,452,878,843,400 | 29.732143 | 74 | 0.669959 | false | 3.790749 | false | false | false |
landscapeio/pylint-common | setup.py | 1 | 1633 | # -*- coding: UTF-8 -*-
import sys
from setuptools import find_packages, setup
_version = '0.2.5'
_packages = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
_short_description = ("pylint-common is a Pylint plugin to improve Pylint "
"error analysis of the standard Python library")
_classifiers = (
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
)
if sys.version_info < (2, 7):
# pylint 1.4 dropped support for Python 2.6
_install_requires = [
'pylint>=1.0,<1.4',
'astroid>=1.0,<1.3.0',
'logilab-common>=0.60.0,<0.63',
'pylint-plugin-utils>=0.2.6',
]
else:
_install_requires = [
'pylint>=1.0',
'pylint-plugin-utils>=0.2.6',
]
setup(
name='pylint-common',
url='https://github.com/landscapeio/pylint-common',
author='landscape.io',
author_email='[email protected]',
description=_short_description,
version=_version,
packages=_packages,
install_requires=_install_requires,
license='GPLv2',
classifiers=_classifiers,
keywords='pylint stdlib plugin',
zip_safe=False # see https://github.com/landscapeio/prospector/issues/18#issuecomment-49857277
)
| gpl-2.0 | -4,498,307,940,988,987,400 | 28.690909 | 99 | 0.620943 | false | 3.55 | false | false | false |
barberscore/barberscore-api | project/apps/salesforce/models.py | 1 | 21997 | import json
# Third-Party
from model_utils import Choices
from distutils.util import strtobool
# Local
from apps.bhs.models import Convention, Award, Chart, Group, Person
from apps.registration.models import Contest, Session, Assignment, Entry
class SfConvention:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_Name'):
d['name'] = str(n.sf_Name.cdata)
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
season = int(float(n.sf_BS_Season__c.cdata))
d['season'] = season
# Panel
if hasattr(n, 'sf_BS_Panel__c'):
d['panel'] = int(float(n.sf_BS_Panel__c.cdata))
# Year
if hasattr(n, 'sf_Year__c'):
d['year'] = int(n.sf_Year__c.cdata)
# Open Date
if hasattr(n, 'sf_Open_Date__c'):
d['open_date'] = n.sf_Open_Date__c.cdata
# Close Date
if hasattr(n, 'sf_Close_Date__c'):
d['close_date'] = n.sf_Close_Date__c.cdata
# Start Date
if hasattr(n, 'sf_Start_Date__c'):
d['start_date'] = n.sf_Start_Date__c.cdata
# End Date
if hasattr(n, 'sf_End_Date__c'):
d['end_date'] = n.sf_End_Date__c.cdata
# Venue
if hasattr(n, 'sf_Venue__c'):
d['venue_name'] = n.sf_Venue__c.cdata
# Location
if hasattr(n, 'sf_Location__c'):
d['location'] = n.sf_Location__c.cdata
# Time Zone
if hasattr(n, 'sf_Time_Zone__c'):
d['timezone'] = n.sf_Time_Zone__c.cdata
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Divisions
if hasattr(n, 'sf_BS_Division__c'):
d['divisions'] = n.sf_BS_Division__c.cdata
# Kinds
if hasattr(n, 'sf_BS_Kind__c'):
d['kinds'] = n.sf_BS_Kind__c.cdata
# Return parsed dict
return d
class SfAward:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
d['gender'] = int(float(n.sf_BS_Classification__c.cdata)) if hasattr(n, 'sf_BS_Classification__c') else None
# Level
if hasattr(n, 'sf_BS_Level__c'):
d['level'] = int(float(n.sf_BS_Level__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# Is Single
if hasattr(n, 'sf_is_single__c'):
d['is_single'] = bool(strtobool(n.sf_is_single__c.cdata))
# Threshold
d['threshold'] = float(n.sf_Threshold__c.cdata) if hasattr(n, 'sf_Threshold__c') else None
# Minimum
d['minimum'] = float(n.sf_Minimum__c.cdata) if hasattr(n, 'sf_Minimum__c') else None
# advance
d['advance'] = float(n.sf_Advance__c.cdata) if hasattr(n, 'sf_Advance__c') else None
# spots
d['spots'] = int(float(n.sf_Spots__c.cdata)) if hasattr(n, 'sf_Spots__c') else None
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Age
d['age'] = int(float(n.sf_BS_Age__c.cdata)) if hasattr(n, 'sf_BS_Age__c') else None
# Is Novice
if hasattr(n, 'sf_is_novice__c'):
d['is_novice'] = bool(strtobool(n.sf_is_novice__c.cdata))
# Size
d['size'] = int(float(n.sf_BS_Size__c.cdata)) if hasattr(n, 'sf_BS_Size__c') else None
# Size Range
d['size_range'] = n.sf_Size_Range__c.cdata if hasattr(n, 'sf_Size_Range__c') else None
# Scope
d['scope'] = int(float(n.sf_BS_Scope__c.cdata)) if hasattr(n, 'sf_BS_Scope__c') else None
# Scope Range
d['scope_range'] = n.sf_Scope_Range__c.cdata if hasattr(n, 'sf_Scope_Range__c') else None
# Tree Sort
d['tree_sort'] = int(float(n.sf_Tree_Sort__c.cdata)) if hasattr(n, 'sf_Tree_Sort__c') else None
# Return parsed dict
return d
class SfChart:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_Name'):
d['title'] = n.sf_Name.cdata
# Arrangers
if hasattr(n, 'sf_Arrangers__c'):
d['arrangers'] = n.sf_Arrangers__c.cdata
# Composer
d['composers'] = n.sf_Composers__c.cdata if hasattr(n, 'sf_Composers__c') else ""
# Lyricist
d['lyricists'] = n.sf_Lyricists__c.cdata if hasattr(n, 'sf_Lyricists__c') else ""
# Holders
d['holders'] = n.sf_Holders__c.cdata if hasattr(n, 'sf_Holders__c') else ""
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Return parsed dict
return d
class SfGroup:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
if hasattr(n, 'sf_BS_Classification__c'):
d['gender'] = int(float(n.sf_BS_Classification__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# bhs_id
if hasattr(n, 'sf_cfg_Member_Id__c') and n.sf_cfg_Member_Id__c.cdata.isalnum():
# Is a Chorus
# code
d['code'] = n.sf_cfg_Member_Id__c.cdata if hasattr(n, 'sf_cfg_Member_Id__c') else ""
elif hasattr(n, 'sf_cfg_Member_Id__c'):
# Is a Quartet
d['bhs_id'] = int(n.sf_cfg_Member_Id__c.cdata) if hasattr(n, 'sf_cfg_Member_Id__c') else None
# Return parsed dict
return d
class SfPerson:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_FirstName') and hasattr(n, 'sf_LastName'):
d['name'] = n.sf_FirstName.cdata + " " + n.sf_LastName.cdata
# First Name
d['first_name'] = n.sf_FirstName.cdata if hasattr(n, 'sf_FirstName') else ""
# Last Name
d['last_name'] = n.sf_LastName.cdata if hasattr(n, 'sf_LastName') else ""
# part
d['part'] = int(float(n.sf_BS_VoicePart__c.cdata)) if hasattr(n, 'sf_BS_VoicePart__c') else None
# Gender
d['gender'] = int(float(n.sf_BS_Gender__c.cdata)) if hasattr(n, 'sf_BS_Gender__c') else None
# Email
d['email'] = n.sf_npe01__HomeEmail__c.cdata if hasattr(n, 'sf_npe01__HomeEmail__c') else ""
# Home Phone
d['home_phone'] = n.sf_HomePhone.cdata if hasattr(n, 'sf_HomePhone') else ""
# Cell Phone
d['cell_phone'] = n.sf_MobilePhone.cdata if hasattr(n, 'sf_MobilePhone') else ""
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Number__c.cdata) if hasattr(n, 'sf_cfg_Member_Number__c') else None
# Return parsed dict
return d
class SfSession:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Num Rounds
if hasattr(n, 'sf_Num_rounds__c'):
d['num_rounds'] = int(float(n.sf_Num_rounds__c.cdata))
# Is Invitational
if hasattr(n, 'sf_is_invitational__c'):
d['is_invitational'] = bool(strtobool(n.sf_is_invitational__c.cdata))
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Footnotes
d['footnotes'] = n.sf_Footnotes__c.cdata if hasattr(n, 'sf_Footnotes__c') else ""
if hasattr(n, 'sf_BS_Convention_UUID__c'):
d['convention_id'] = n.sf_BS_Convention_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# Panel
if hasattr(n, 'sf_BS_Panel__c'):
d['panel'] = int(float(n.sf_BS_Panel__c.cdata))
# Year
if hasattr(n, 'sf_Year__c'):
d['year'] = int(n.sf_Year__c.cdata)
# Open Date
if hasattr(n, 'sf_Open_Date__c'):
d['open_date'] = n.sf_Open_Date__c.cdata
# Close Date
if hasattr(n, 'sf_Close_Date__c'):
d['close_date'] = n.sf_Close_Date__c.cdata
# Start Date
if hasattr(n, 'sf_Start_Date__c'):
d['start_date'] = n.sf_Start_Date__c.cdata
# End Date
if hasattr(n, 'sf_End_Date__c'):
d['end_date'] = n.sf_End_Date__c.cdata
# Venue
if hasattr(n, 'sf_Venue__c'):
d['venue_name'] = n.sf_Venue__c.cdata
# Location
if hasattr(n, 'sf_Location__c'):
d['location'] = n.sf_Location__c.cdata
# Time Zone
if hasattr(n, 'sf_Time_Zone__c'):
d['timezone'] = n.sf_Time_Zone__c.cdata
# Divisions
if hasattr(n, 'sf_BS_Division__c'):
d['divisions'] = n.sf_BS_Division__c.cdata
# Return parsed dict
return d
class SfContest:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Award ID
if hasattr(n, 'sf_BS_Award_UUID__c'):
d['award_id'] = n.sf_BS_Award_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
d['gender'] = int(float(n.sf_BS_Classification__c.cdata)) if hasattr(n, 'sf_BS_Classification__c') else None
# Level
if hasattr(n, 'sf_BS_Level__c'):
d['level'] = int(float(n.sf_BS_Level__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# Age
d['age'] = int(float(n.sf_BS_Age__c.cdata)) if hasattr(n, 'sf_BS_Age__c') else None
# Is Novice
if hasattr(n, 'sf_is_novice__c'):
d['is_novice'] = bool(strtobool(n.sf_is_novice__c.cdata))
# Is Single
if hasattr(n, 'sf_is_single__c'):
d['is_single'] = bool(strtobool(n.sf_is_single__c.cdata))
# Size
d['size'] = int(float(n.sf_BS_Size__c.cdata)) if hasattr(n, 'sf_BS_Size__c') else None
# Size Range
d['size_range'] = n.sf_Size_Range__c.cdata if hasattr(n, 'sf_Size_Range__c') else None
# Scope
d['scope'] = int(float(n.sf_BS_Scope__c.cdata)) if hasattr(n, 'sf_BS_Scope__c') else None
# Scope Range
d['scope_range'] = n.sf_Scope_Range__c.cdata if hasattr(n, 'sf_Scope_Range__c') else None
# Tree Sort
d['tree_sort'] = int(float(n.sf_Tree_Sort__c.cdata)) if hasattr(n, 'sf_Tree_Sort__c') else None
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfAssignment:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Kind
if hasattr(n, 'sf_BS_Type__c'):
d['kind'] = int(float(n.sf_BS_Type__c.cdata))
# Category
if hasattr(n, 'sf_BS_Category__c'):
d['category'] = int(float(n.sf_BS_Category__c.cdata))
# Person ID
if hasattr(n, 'sf_BS_Contact_UUID__c'):
d['person_id'] = n.sf_BS_Contact_UUID__c.cdata
# Name
d['name'] = n.sf_Name__c.cdata if hasattr(n, 'sf_Name__c') else None
# First Name
d['first_name'] = n.sf_FirstName__c.cdata if hasattr(n, 'sf_FirstName__c') else None
# Last Name
d['last_name'] = n.sf_LastName__c.cdata if hasattr(n, 'sf_LastName__c') else None
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Area
if hasattr(n, 'sf_Area__c'):
d['area'] = n.sf_Area__c.cdata
# Email
d['email'] = n.sf_HomeEmail__c.cdata if hasattr(n, 'sf_HomeEmail__c') else None
# Cell Phone
d['cell_phone'] = n.sf_MobilePhone__c.cdata if hasattr(n, 'sf_MobilePhone__c') else None
# Airports
d['airports'] = n.sf_Airports__c.cdata if hasattr(n, 'sf_Airports__c') else None
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Number__c.cdata) if hasattr(n, 'sf_cfg_Member_Number__c') else None
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfEntry:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Is Evaluation
if hasattr(n, 'sf_is_evaluation__c'):
d['is_evaluation'] = bool(strtobool(n.sf_is_evaluation__c.cdata))
# Is Private
if hasattr(n, 'sf_is_private__c'):
d['is_private'] = bool(strtobool(n.sf_is_private__c.cdata))
# Is MT
if hasattr(n, 'sf_is_mt__c'):
d['is_mt'] = bool(strtobool(n.sf_is_mt__c.cdata))
# Is Senior
if hasattr(n, 'sf_is_senior__c'):
d['is_senior'] = bool(strtobool(n.sf_is_senior__c.cdata))
# Is Youth
if hasattr(n, 'sf_is_youth__c'):
d['is_youth'] = bool(strtobool(n.sf_is_youth__c.cdata))
# Draw
d['draw'] = int(float(n.sf_Draw_Order__c.cdata)) if hasattr(n, 'sf_Draw_Order__c') else None
# Prelim
d['prelim'] = float(n.sf_Prelim__c.cdata) if hasattr(n, 'sf_Prelim__c') else None
# Base
d['base'] = float(n.sf_Base__c.cdata) if hasattr(n, 'sf_Base__c') else None
# Participants
d['participants'] = n.sf_Participants__c.cdata if hasattr(n, 'sf_Participants__c') else ""
# POS
d['pos'] = int(float(n.sf_Persons_On_Stage__c.cdata)) if hasattr(n, 'sf_Persons_On_Stage__c') else None
# Area
if hasattr(n, 'sf_Organization__c'):
d['area'] = n.sf_Organization__c.cdata
# Chapters
d['chapters'] = n.sf_Chapters__c.cdata if hasattr(n, 'sf_Chapters__c') else ""
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Group ID
if hasattr(n, 'sf_BS_Account_UUID__c'):
d['group_id'] = n.sf_BS_Account_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
if hasattr(n, 'sf_BS_Classification__c'):
d['gender'] = int(float(n.sf_BS_Classification__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
if hasattr(n, 'sf_cfg_Member_Id__c'):
if (n.sf_cfg_Member_Id__c.cdata.isdigit()):
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Id__c.cdata)
else:
# code
d['code'] = n.sf_cfg_Member_Id__c.cdata
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfEntryContest:
def parse_sf_notification(n):
d = {}
# Contest UUID
if hasattr(n, 'sf_BS_Contest_UUID__c'):
d['contest_id'] = n.sf_BS_Contest_UUID__c.cdata
# Entry UUID
if hasattr(n, 'sf_BS_Entry_UUID__c'):
d['entry_id'] = n.sf_BS_Entry_UUID__c.cdata
# Is Deleted
if hasattr(n, 'sf_IsDeleted'):
d['deleted'] = bool(strtobool(n.sf_IsDeleted.cdata))
# Return parsed dict
return d
class SfGroupChart:
def parse_sf_notification(n):
d = {}
# Group UUID
if hasattr(n, 'sf_BS_Account_UUID__c'):
d['group_id'] = n.sf_BS_Account_UUID__c.cdata
# Chart UUID
if hasattr(n, 'sf_BS_Chart_UUID__c'):
d['chart_id'] = n.sf_BS_Chart_UUID__c.cdata
# Is Deleted
if hasattr(n, 'sf_IsDeleted'):
d['deleted'] = bool(strtobool(n.sf_IsDeleted.cdata))
# Return parsed dict
return d
| bsd-2-clause | -6,375,111,616,845,028,000 | 28.927891 | 116 | 0.519525 | false | 3.108677 | false | false | false |
aaronn/django-rest-framework-passwordless | drfpasswordless/urls.py | 1 | 1129 | from drfpasswordless.settings import api_settings
from django.urls import path
from drfpasswordless.views import (
ObtainEmailCallbackToken,
ObtainMobileCallbackToken,
ObtainAuthTokenFromCallbackToken,
VerifyAliasFromCallbackToken,
ObtainEmailVerificationCallbackToken,
ObtainMobileVerificationCallbackToken,
)
app_name = 'drfpasswordless'
urlpatterns = [
path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'email/', ObtainEmailCallbackToken.as_view(), name='auth_email'),
path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'mobile/', ObtainMobileCallbackToken.as_view(), name='auth_mobile'),
path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'token/', ObtainAuthTokenFromCallbackToken.as_view(), name='auth_token'),
path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'email/', ObtainEmailVerificationCallbackToken.as_view(), name='verify_email'),
path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'mobile/', ObtainMobileVerificationCallbackToken.as_view(), name='verify_mobile'),
path(api_settings.PASSWORDLESS_VERIFY_PREFIX, VerifyAliasFromCallbackToken.as_view(), name='verify_token'),
]
| mit | 6,030,230,326,098,474,000 | 52.761905 | 134 | 0.772365 | false | 3.827119 | false | true | false |
yuanchima/Activation-Visualization-Histogram | datasets/svhn.py | 1 | 2526 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import scipy.io
import scipy.ndimage as sn
import h5py
from util import log
# __PATH__ = os.path.abspath(os.path.dirname(__file__))
__PATH__ = './datasets/svhn'
rs = np.random.RandomState(123)
class Dataset(object):
def __init__(self, ids, name='default',
max_examples=None, is_train=True):
self._ids = list(ids)
self.name = name
self.is_train = is_train
if max_examples is not None:
self._ids = self._ids[:max_examples]
filename = 'data.hy'
file = os.path.join(__PATH__, filename)
log.info("Reading %s ...", file)
try:
self.data = h5py.File(file, 'r')
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
log.info("Reading Done: %s", file)
def get_data(self, id):
# preprocessing and data augmentation
m = self.data[id]['image'].value/255.
l = self.data[id]['label'].value.astype(np.float32)
# Data augmentation: rotate 0, 90, 180, 270
"""
rot_num = np.floor(np.random.rand(1)*4)
for i in range(rot_num):
m = np.rot90(m, axes=(0, 1))
m = m + np.random.randn(*m.shape) * 1e-2
"""
return m, l
@property
def ids(self):
return self._ids
def __len__(self):
return len(self.ids)
def __repr__(self):
return 'Dataset (%s, %d examples)' % (
self.name,
len(self)
)
def get_data_info():
return np.array([32, 32, 10, 3])
def get_conv_info():
return np.array([64, 128, 256])
def get_vis_info():
return np.array([[128, 128], [64, 128], [64, 64], [10, 16], [5, 8], [2, 5]])
def create_default_splits(is_train=True):
ids = all_ids()
n = len(ids)
num_trains = 73257
dataset_train = Dataset(ids[:num_trains], name='train', is_train=False)
dataset_test = Dataset(ids[num_trains:], name='test', is_train=False)
return dataset_train, dataset_test
def all_ids():
id_filename = 'id.txt'
id_txt = os.path.join(__PATH__, id_filename)
try:
with open(id_txt, 'r') as fp:
_ids = [s.strip() for s in fp.readlines() if s]
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
rs.shuffle(_ids)
return _ids
| mit | 8,197,584,550,295,132,000 | 25.041237 | 96 | 0.563737 | false | 3.350133 | false | false | false |
simplelist/python_test01 | mechine/KNY.py | 1 | 3559 | #-*- coding: utf-8 -*-
from numpy import *
import operator
# 读取数据到矩阵
def file2matrix(filename):
# 打开数据文件,读取每行内容
fr = open(filename)
arrayOLines = fr.readlines()
# 初始化矩阵
numberOfLines = len(arrayOLines)
returnMat = zeros((numberOfLines,3))
# 初始化类标签向量
classLabelVector = []
# 循环读取每一行数据
index = 0
for line in arrayOLines:
# 去掉回车符
line = line.strip()
# 提取4个数据项
listFromLine = line.split('\t')
# 将前三项数据存入矩阵
returnMat[index,:] = listFromLine[0:3]
# 将第四项数据存入向量
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
# 数据归一化
def autoNorm(dataSet):
# 读取矩阵中数据项的最大和最小值
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
# 获得最大和最小值间差值
ranges = maxVals - minVals
# 初始化输出结果
normDataSet = zeros(shape(dataSet))
# 获取矩阵的行数
m = dataSet.shape[0]
# 矩阵运算:实现归一化公式中的 oldValue - min 一步
normDataSet = dataSet - tile(minVals, (m,1))
# 矩阵除法:实现归一化公式中的除法
normDataSet = normDataSet/tile(ranges, (m,1))
# 返回归一化后的数据,数据范围及最小值矩阵
return normDataSet, ranges, minVals
# kNN算法实现
def classify0(inX, dataSet, labels, k):
# 获取样本数据数量
dataSetSize = dataSet.shape[0]
# 矩阵运算,计算测试数据与每个样本数据对应数据项的差值
diffMat = tile(inX, (dataSetSize,1)) - dataSet
# sqDistances 上一步骤结果平方和
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
# 取平方根,得到距离向量
distances = sqDistances**0.5
# 按照距离从低到高排序
sortedDistIndicies = distances.argsort()
classCount={}
# 依次取出最近的样本数据
for i in range(k):
# 记录该样本数据所属的类别
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
# 对类别出现的频次进行排序,从高到低
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
# 返回出现频次最高的类别
return sortedClassCount[0][0]
# 算法测试
def datingClassTest():
# 设定测试数据的比例
hoRatio = 0.10
# 读取数据
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
# 归一化数据
normMat, ranges, minVals = autoNorm(datingDataMat)
# 数据总行数
m = normMat.shape[0]
# 测试数据行数
numTestVecs = int(m*hoRatio)
# 初始化错误率
errorCount = 0.0
# 循环读取每行测试数据
for i in range(numTestVecs):
# 对该测试人员进行分类
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
# 打印KNN算法分类结果和真实的分类
print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
# 判断KNN算法结果是否准确
if (classifierResult != datingLabels[i]): errorCount += 1.0
# 打印错误率
print "the total error rate is: %f" % (errorCount/float(numTestVecs))
# 执行算法测试
datingClassTest() | lgpl-3.0 | 314,112,716,281,400,060 | 21 | 111 | 0.641604 | false | 2.084328 | true | false | false |
darrencheng0817/AlgorithmLearning | Python/leetcode/uglyNumber.py | 1 | 2890 | '''
Created on 2015年12月11日
https://leetcode.com/problems/ugly-number/
https://leetcode.com/problems/ugly-number-ii/
https://leetcode.com/problems/super-ugly-number/
@author: Darren
'''
'''
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
'''
def isUlgyNumber(num):
if not num:
return False
if num==1:
return True
if num%2==0:
return isUlgyNumber(num//2)
if num%3==0:
return isUlgyNumber(num//3)
if num%5==0:
return isUlgyNumber(num//5)
return False
print(isUlgyNumber(14))
'''
Write a program to find the n-th ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.
Note that 1 is typically treated as an ugly number.
'''
def ulgyNumber(N):
if N<1:
raise Exception("Invalid Input")
if N==1:
return 1
res=[1]*N
count=[0]*3
primes=[2,3,5]
for i in range(1,N):
nextNum=min([prime*res[count[j]] for j,prime in enumerate(primes)])
for j,prime in enumerate(primes):
if nextNum==prime*res[count[j]]:
count[j]+=1
res[i]=nextNum
return res
print(ulgyNumber(10))
'''
Write a program to find the nth super ugly number.
Super ugly numbers are positive numbers whose all prime factors are in the given prime list primes of size k. For example, [1, 2, 4, 7, 8, 13, 14, 16, 19, 26, 28, 32] is the sequence of the first 12 super ugly numbers given primes = [2, 7, 13, 19] of size 4.
Note:
(1) 1 is a super ugly number for any given primes.
(2) The given numbers in primes are in ascending order.
(3) 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
'''
def nthSuperUglyNumber(n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
if n==1:
return 1
res=[1]*n
count=[0]*len(primes)
for __index in range(1,n):
nextNum=min([prime*res[count[index]] for index,prime in enumerate(primes)])
for index,prime in enumerate(primes):
if nextNum==prime*res[count[index]]:
count[index]+=1
res[__index]=nextNum
return res[-1]
n=200000
primes=[2,3,5,13,19,29,31,41,43,53,59,73,83,89,97,103,107,109,127,137,139,149,163,173,179,193,197,199,211,223,227,229,239,241,251,257,263,269,271,281,317,331,337,347,353,359,367,373,379,389,397,409,419,421,433,449,457,461,463,479,487,509,521,523,541,547,563,569,577,593,599,601,613,619,631,641,659,673,683,701,709,719,733,739,743,757,761,769,773,809,811,829,857,859,881,919,947,953,967,971]
print(nthSuperUglyNumber(n, primes)) | mit | 3,427,185,567,020,352,500 | 32.5 | 390 | 0.651042 | false | 2.894472 | false | false | false |
proggy/fns | frogs.py | 1 | 1401 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright notice
# ----------------
#
# Copyright (C) 2014 Daniel Jung
# Contact: [email protected]
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
"""Frog definitions for this package. Requires the frog module."""
__created__ = '2014-09-27'
__modified__ = '2014-09-27'
from frog import Frog
import fns
# dashify
f = Frog(inmap=dict(files='$@'),
usage='dashify [options] FILES',
optdoc=dict(alldots='do not even preserve the last dot',
verbose='be verbose',
test='list changes without actually renaming any files',
nolower='do not switch to lowercase'))
#allow_interspersed_args=False
f(fns.dashify)
| gpl-2.0 | 796,572,170,233,028,400 | 34.025 | 77 | 0.683797 | false | 3.848901 | false | false | false |
kapadia/toplotly | toplotly/__init__.py | 1 | 2486 |
import os
import json
from dateutil.parser import parse
import plotly
from plotly.graph_objs import Histogram, Scatter, Scatter3d, Data, Layout, XAxis, YAxis, ZAxis, Figure
__version__ = '0.0.1'
def is_numeric(x):
try:
float(x)
return True
except ValueError:
return False
def is_date(d):
try:
parse(d)
return True
except ValueError, AttributeError:
return False
def is_string(x):
return is_numeric(x) + is_date(x) == 0
def format_data(data):
data = json.loads(''.join(data))
keys = data[0].keys()
# Check column type
sidx = [ idx for idx, key in enumerate(keys) if is_string(data[0][key]) ]
values = [ [ d.get(key) for key in keys ] for d in data ]
values = zip(*values)
if len(sidx) == 1:
text = values.pop(sidx[0])
keys.pop(sidx[0])
else:
text = None
return {
'layout': {
'axes': keys
},
'data': {
'values': values,
'text': text
}
}
def get_histogram(data):
values = data['values']
return Data([
Histogram(
x=values
)
])
def get_scatter2d(data):
values = data['values']
return Data([
Scatter(
x=values[0],
y=values[1],
mode='markers',
text=data['text']
)
])
def get_scatter3d(data):
values = data['values']
return Data([
Scatter3d(
x=values[0],
y=values[1],
z=values[2]
)
])
def post(filename, data, fileopt='new', title=None, world_readable=True):
# Get username and api key
username = os.environ.get('PLOTLY_USERNAME')
api_key = os.environ.get('PLOTLY_API_KEY')
plotly.tools.set_credentials_file(username=username, api_key=api_key)
axes = data['layout']['axes']
nAxes = len(axes)
get_data = {
1: get_histogram,
2: get_scatter2d,
3: get_scatter3d
}
axes_kwargs = ['xaxis', 'yaxis', 'zaxis']
axes_obj = [XAxis, YAxis, ZAxis]
layout_kwargs = { axes_kwargs[idx]: axes_obj[idx](title=axis) for idx, axis in enumerate(axes) }
dataobj = get_data[nAxes](data['data'])
layout = Layout(**layout_kwargs)
fig = Figure(data=dataobj, layout=layout)
r = plotly.plotly.plot(fig, filename=filename)
print r
| mit | -6,038,778,973,686,361,000 | 18.429688 | 102 | 0.530571 | false | 3.501408 | false | false | false |
Liorst4/pysteamcli | pysteamcli/app_manifest.py | 1 | 1745 | #!/usr/bin/env python3
"""
Parse Steam's application manifest files.
"""
import itertools
def next_data(it):
"""
Advances an iterator until new data is found.
:param it: Character iterator.
:returns: Data found.
"""
quotation_mark = lambda c: c != '"'
data_begin = itertools.dropwhile(quotation_mark, it)
next(data_begin)
data = itertools.takewhile(quotation_mark, data_begin)
return ''.join(data)
def next_scope(it):
"""
Advances the iterator until a scope closing mark is found.
:param it: Character iterator.
:returns: The content of the scope.
"""
s_counter = 0
for i in it:
if i == '{':
s_counter += 1
elif i == '}':
if s_counter == 0:
break
else:
s_counter -= 1
yield i
def parse_acf_content(it):
"""
Parse the content of an acf file.
:param it: Character iterator.
:returns: The content of an acf file as a dictionary.
"""
result = list()
while True:
try:
key = next_data(it)
value_type = next(it)
next(it)
if value_type == '\t':
# Data
value = next_data(it)
elif value_type == '\n':
# Nested scope.
value = parse_acf_content(next_scope(it))
else:
raise Exception
except StopIteration:
break
result.append((key, value))
return dict(result)
def parse_acf_file(file_path):
"""
Parse an acf file.
"""
with open(file_path, 'r') as acf_file:
content = acf_file.read()
return parse_acf_content(iter(content))
| mit | 4,851,794,010,905,452,000 | 18.388889 | 62 | 0.525501 | false | 3.93018 | false | false | false |
xaime/sneaks | modules/report.py | 1 | 31061 | # - *- coding: utf- 8 - *- .
# SNEAKS - Snooping Early Alert Knowledge Service
from datetime import timedelta
from boomslang import *
import math
import pygeoip
import socket
from ConfigParser import RawConfigParser
import html2text
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import codecs
import logging
import re
import smtplib
from operator import itemgetter
def tagreplace(tag, html, replacehtml):
"""
Reemplaza el texto contenido entre <!--tag--> y <!--/tag--> por replacehtml
"""
t1 = html.index("<!--" + tag + "-->")
t2 = html.index("<!--/" + tag + "-->")
return html[:t1] + replacehtml + html[t2 + len(tag) + 8:]
def tagdelete(tag, html):
"""
Elimina el texto contenido entre <!--tag--> y <!--/tag-->
"""
t1 = html.index("<!--" + tag + "-->")
t2 = html.index("<!--/" + tag + "-->")
return html[:t1] + html[t2 + len(tag) + 8:]
def org_report_chart(rtime_frame, rinterval, rtime, rips, people, org_alarm_threshold, plugin_dir):
"""
Genera los gráficos de footprinting de la organización y todas las personas con detecciones
y los guarda en la carpeta temp
"""
detected_persons = 0
person_index = []
pinterval = rinterval + rtime_frame
rorg_eval_data = [0] * pinterval
for rperson in people:
rperson.eval_data(rtime_frame, pinterval, rtime, rips)
if rperson.detection:
person_index.append(people.index(rperson))
detected_persons += 1
for j in range(pinterval):
rorg_eval_data[j] += rperson.detection_data[j]
datemin = rtime - timedelta(minutes=(pinterval - 1))
datelist = []
for i in range(pinterval):
datelist.append(datemin + timedelta(minutes=i))
dateidx = range(len(rorg_eval_data))
orgplot = Plot()
orgplot.yLimits = (0, max(rorg_eval_data) + 10)
orgplot.xLimits = (0, max(dateidx))
orgplot.grid.visible = True
orgplot.title = u"Nivel de footprinting sobre la organización"
orgplot.yLabel = u"Valor acumulado en los últimos " + str(rtime_frame) + " minutos"
orgplot.yLabelProperties = {"color":"#808080", "fontsize": 10}
# relleno gris claro del intervalo completo
orgline_fill = StackedLines()
orgline_fill1 = Line()
orgline_fill1.xValues = dateidx
orgline_fill1.yValues = rorg_eval_data
orgline_fill1.lineWidth = 0
points = [dateidx[datelist.index(c)] for c in datelist if c.minute in {0, 30}]
labels = [c.strftime("%H:%M") for c in datelist if c.minute in {0, 30}]
if len(points) > 24:
points = [dateidx[datelist.index(c)] for c in datelist if c.minute == 0]
labels = [c.strftime("%H:%M") for c in datelist if c.minute == 0]
orgline_fill.xTickLabelPoints = points
orgline_fill.xTickLabels = labels
orgline_fill.xTickLabelProperties = {"rotation": 45, "fontsize": 10}
orgline_fill.addLine(orgline_fill1, color="#E6E6E6")
orgplot.add(orgline_fill)
# linea intermintente del intervalo completo
orgline_p = Line()
orgline_p.xValues = dateidx
orgline_p.yValues = rorg_eval_data
orgline_p.lineStyle = "--"
orgline_p.color = "#B2B2B2"
orgplot.add(orgline_p)
# relleno rojo del intervalo analizado
orgline_fill_p = StackedLines()
orgline_fill_p1 = Line()
orgline_fill_p1.xValues = dateidx[rtime_frame:]
orgline_fill_p1.yValues = rorg_eval_data[rtime_frame:]
orgline_fill_p1.lineWidth = 0
orgline_fill_p.addLine(orgline_fill_p1, color="#FF0000")
orgplot.add(orgline_fill_p)
# Se añade la linea sólida de nivel acumulado para "rinterval"
orgline_s = Line()
orgline_s.xValues = dateidx[rtime_frame:]
orgline_s.yValues = rorg_eval_data[rtime_frame:]
orgline_s.lineWidth = 2
orgplot.add(orgline_s)
# Se añade la linea de umbral y su etiqueta
torgline = Line()
torgline.xValues = dateidx
torgline.yValues = [org_alarm_threshold]*pinterval
torgline.lineStyle = "--"
torgline.color = 'r'
orgplot.add(torgline)
tlabel = Label(len(dateidx)/12, org_alarm_threshold + ((max(rorg_eval_data) + 10)/50),
"Umbral (" + str(org_alarm_threshold) + ")")
orgplot.add(tlabel)
# relleno azul del intervalo analizado por debajo del umbral
orgline_fill_u = StackedLines()
orgline_fill_u1 = Line()
orgline_fill_u1.xValues = dateidx[rtime_frame:]
temp = rorg_eval_data[rtime_frame:]
for i in range(len(temp)):
if temp[i] > org_alarm_threshold:
temp[i] = org_alarm_threshold
orgline_fill_u1.yValues = temp
orgline_fill_u1.lineWidth = 0
orgline_fill_u.addLine(orgline_fill_u1, color="#3399FF")
orgplot.add(orgline_fill_u)
# Se añade la linea vertical que marca el intervalo analizado
vline1 = VLine()
vline1.xValues = \
[dateidx[datelist.index(c)] for c in datelist if
(c.minute == (rtime - timedelta(minutes=rinterval - 1)).minute
and c.hour == (rtime - timedelta(minutes=rinterval - 1)).hour)]
vline1.color = 'b'
vline1.lineStyle = ":"
orgplot.add(vline1)
rorg_eval_data_polar = [0]*len(plugin_dir)
for i in person_index:
for j in range(len(plugin_dir)):
rorg_eval_data_polar[j] += max(people[i].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips))
# Se dibuja la proyección de tipo radar
radarplot = Plot()
radarplot.projection = 'polar'
radarplot.title = u"Valor máximo por origen de detección"
radarplot.yLimits = (0, max(rorg_eval_data_polar) + 2)
radarplot.grid.color = "#A1A1A1"
radarplot.grid.visible = True
radarplot.grid.style = "--"
lineradar = Line()
t = len(plugin_dir)
lineradar.yValues = rorg_eval_data_polar + [rorg_eval_data_polar[0]]
lineradar.xValues = [(2*math.pi/t)*x for x in range(t)] + [2*math.pi]
lineradar.xTickLabelPoints = [(2*math.pi/t)*x for x in range(t)]
lineradar.xTickLabels = [p[8:] for p in plugin_dir]
lineradar.xTickLabelProperties = {"color": "#006600", "alpha": 0.8}
lineradar.lineWidth = 2
lineradar.color = "r"
radarscat = Scatter()
radarscat.xValues = lineradar.xValues
radarscat.yValues = lineradar.yValues
radarscat.markerSize = 25
radarscat.marker = "s"
radarplot.add(lineradar)
radarplot.add(radarscat)
orgplot.setDimensions(8, 5, dpi=75)
radarplot.setDimensions(5, 5, dpi=50)
orgplot.save("temp/imgchart_org.png")
radarplot.save("temp/imgradar_org.png")
# Ahora se comienza con el dibujo de las gráficas para cada pesona con detecciones
personplot = []
personline_fill = []
personline_fill1 = []
personline_p = []
personline_fill_p = []
personline_fill_p1 = []
personline_s = []
tpersonline = []
tplabel = []
personline_fill_u = []
personline_fill_u1 = []
vline = []
pradarplot = []
plineradar = []
pradarscat = []
for idx in person_index:
people[idx].eval_data(rtime_frame, pinterval, rtime, rips)
p_eval_data = people[idx].detection_data
personplot.append(Plot())
personplot[-1].yLimits = orgplot.yLimits
personplot[-1].xLimits = orgplot.xLimits
personplot[-1].grid.visible = True
personplot[-1].title = "Nivel de footprinting sobre " + people[idx].name
personplot[-1].yLabel = orgplot.yLabel
personplot[-1].yLabelProperties = orgplot.yLabelProperties
# relleno gris claro del intervalo completo
personline_fill.append(StackedLines())
personline_fill1.append(Line())
personline_fill1[-1].xValues = dateidx
personline_fill1[-1].yValues = p_eval_data
personline_fill1[-1].lineWidth = 0
personline_fill[-1].xTickLabelPoints = orgline_fill.xTickLabelPoints
personline_fill[-1].xTickLabels = orgline_fill.xTickLabels
personline_fill[-1].xTickLabelProperties = orgline_fill.xTickLabelProperties
personline_fill[-1].addLine(personline_fill1[-1], color="#E6E6E6")
personplot[-1].add(personline_fill[-1])
# linea intermintente del intervalo completo
personline_p.append(Line())
personline_p[-1].xValues = dateidx
personline_p[-1].yValues = p_eval_data
personline_p[-1].lineStyle = "--"
personline_p[-1].color = "#B2B2B2"
personplot[-1].add(personline_p[-1])
# relleno rojo del intervalo analizado
personline_fill_p.append(StackedLines())
personline_fill_p1.append(Line())
personline_fill_p1[-1].xValues = orgline_fill_p1.xValues
personline_fill_p1[-1].yValues = p_eval_data[rtime_frame:]
personline_fill_p1[-1].lineWidth = 0
personline_fill_p[-1].addLine(personline_fill_p1[-1], color="#FF8080")
personplot[-1].add(personline_fill_p[-1])
# Se añade la linea sólida de nivel acumulado para "rinterval"
personline_s.append(Line())
personline_s[-1].xValues = orgline_s.xValues
personline_s[-1].yValues = p_eval_data[rtime_frame:]
personline_s[-1].lineWidth = 2
personline_s[-1].color = "#666666"
personplot[-1].add(personline_s[-1])
# Se añade la linea de umbral y su etiqueta
tpersonline.append(Line())
tpersonline[-1].xValues = dateidx
tpersonline[-1].yValues = [people[idx].alarm_threshold]*pinterval
tpersonline[-1].lineStyle = "--"
tpersonline[-1].color = 'r'
personplot[-1].add(tpersonline[-1])
tplabel.append(Label(len(dateidx)/7, people[idx].alarm_threshold + ((max(rorg_eval_data) + 10)/50),
"Umbral personal (" + str(people[idx].alarm_threshold) + ")"))
personplot[-1].add(tplabel[-1])
# relleno azul del intervalo analizado por debajo del umbral
personline_fill_u.append(StackedLines())
personline_fill_u1.append(Line())
personline_fill_u1[-1].xValues = dateidx[rtime_frame:]
temp = p_eval_data[rtime_frame:]
for i in range(len(temp)):
if temp[i] > people[idx].alarm_threshold:
temp[i] = people[idx].alarm_threshold
personline_fill_u1[-1].yValues = temp
personline_fill_u1[-1].lineWidth = 0
personline_fill_u[-1].addLine(personline_fill_u1[-1], color="#85C2FF")
personplot[-1].add(personline_fill_u[-1])
# Se añade la linea vertical que marca el intervalo analizado
vline.append(VLine())
vline[-1].xValues = \
[dateidx[datelist.index(c)] for c in datelist if
(c.minute == (rtime - timedelta(minutes=rinterval - 1)).minute
and c.hour == (rtime - timedelta(minutes=rinterval - 1)).hour)]
vline[-1].color = 'b'
vline[-1].lineStyle = ":"
personplot[-1].add(vline[-1])
pplugin = [p[8:] for p in plugin_dir]
for ds in people[idx].datasources: # Se eliminan las etiquetas de plugins desactivados
if not ds.enabled:
for p in plugin_dir:
if str(ds).count(p):
pplugin.pop(pplugin.index(p[8:]))
t = len(pplugin)
p_eval_data_polar = []
for j in range(len(people[idx].datasources)):
if people[idx].datasources[j].enabled:
p_eval_data_polar.append(max(people[idx].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips)))
# Se dibuja la proyección de tipo radar
pradarplot.append(Plot())
pradarplot[-1].projection = 'polar'
pradarplot[-1].title = u"Valor máximo por origen de detección\n" + people[idx].name
pradarplot[-1].yLimits = (0, max(rorg_eval_data_polar) + 2)
pradarplot[-1].grid.color = "#A1A1A1"
pradarplot[-1].grid.visible = True
pradarplot[-1].grid.style = "--"
plineradar.append(Line())
plineradar[-1].yValues = p_eval_data_polar + [p_eval_data_polar[0]]
plineradar[-1].xValues = [(2*math.pi/t)*x for x in range(t)] + [2*math.pi]
plineradar[-1].xTickLabelPoints = [(2*math.pi/t)*x for x in range(t)]
plineradar[-1].xTickLabels = pplugin
plineradar[-1].xTickLabelProperties = {"color": "#006600", "alpha": 0.8}
plineradar[-1].lineWidth = 2
plineradar[-1].color = "r"
pradarscat.append(Scatter())
pradarscat[-1].xValues = plineradar[-1].xValues
pradarscat[-1].yValues = plineradar[-1].yValues
pradarscat[-1].markerSize = 25
pradarscat[-1].marker = "s"
pradarplot[-1].add(plineradar[-1])
pradarplot[-1].add(pradarscat[-1])
personplot[-1].setDimensions(8, 5, dpi=75)
pradarplot[-1].setDimensions(5, 5, dpi=50)
personplot[-1].save("temp/imgchart_" + people[idx].person + ".png")
pradarplot[-1].save("temp/imgradar_" + people[idx].person + ".png")
def save_org_report(rtime_frame, rinterval, rtime, rips, people, org_alarm_threshold, plugin_dir, filenamesave):
"""
Genera un informe de eventos de footprinting para la organización
"""
with open("resources/mail/orgreporttemplate.html", 'r') as f:
orghtml = f.read()
detected_persons = 0
person_index = []
rorg_eval_data = [0] * rinterval
for rperson in people:
rperson.eval_data(rtime_frame, rinterval, rtime, rips)
if rperson.detection:
person_index.append(people.index(rperson))
detected_persons += 1
for j in range(rinterval):
rorg_eval_data[j] += rperson.detection_data[j]
prev_rorg_eval_data = [0] * rinterval
for rperson in people:
rperson.eval_data(rtime_frame, rinterval, rtime - timedelta(minutes=rinterval), rips)
if rperson.detection:
for j in range(rinterval):
prev_rorg_eval_data[j] += rperson.detection_data[j]
orghtml = orghtml.replace('-ORGTHRESHOLD-', str(org_alarm_threshold))
if max(rorg_eval_data) >= org_alarm_threshold:
orghtml = orghtml.replace('-TITLE-', "Alarma de Footprinting")
orghtml = tagdelete("NOALARM", orghtml)
if max(prev_rorg_eval_data) < org_alarm_threshold: # Detección nueva
orghtml = tagdelete("ALARMUP", orghtml)
orghtml = tagdelete("ALARMDOWN", orghtml)
orghtml = tagdelete("ALARMSTABLE", orghtml)
orghtml = orghtml.replace('-CHECKINTERVAL-', str(rinterval))
orghtml = orghtml.replace('-LEVELMAX-', str(max(rorg_eval_data)))
levelmaxtime = rtime + timedelta(minutes=rorg_eval_data.index(max(rorg_eval_data)) - rinterval)
orghtml = orghtml.replace('-LEVELMAXTIME-', levelmaxtime.strftime("%H:%M"))
idxtt = 0
for data in rorg_eval_data:
if data > org_alarm_threshold:
idxtt = data
break
timethreshold = rtime + timedelta(minutes=rorg_eval_data.index(idxtt) - rinterval)
orghtml = orghtml.replace('-TIMETHRESHOLD-', timethreshold.strftime("%H:%M"))
elif rorg_eval_data[-1] >= org_alarm_threshold: # Continua la alarma
orghtml = tagdelete("NEWALARM", orghtml)
orghtml = tagdelete("ALARMDOWN", orghtml)
if rorg_eval_data[-1] > prev_rorg_eval_data[-1]:
orghtml = tagdelete("ALARMSTABLE", orghtml)
else:
orghtml = tagdelete("ALARMUP", orghtml)
orghtml = orghtml.replace('-CHECKINTERVAL-', str(rinterval))
orghtml = orghtml.replace('-LASTLEVEL-', str(rorg_eval_data[-1]))
elif rorg_eval_data[-1] < org_alarm_threshold: # Se acaba la alarma
orghtml = tagdelete("ALARMUP", orghtml)
orghtml = tagdelete("NEWALARM", orghtml)
orghtml = tagdelete("ALARMSTABLE", orghtml)
orghtml = tagdelete("RUNNINGFOOTPRINTING", orghtml)
idxtt = 0
for data in rorg_eval_data[::-1]:
if data >= org_alarm_threshold:
idxtt = data
break
leveldown = rtime + timedelta(minutes=rorg_eval_data.index(idxtt) - rinterval)
orghtml = orghtml.replace('-LEVELDOWN-', leveldown.strftime("%H:%M"))
else:
orghtml = orghtml.replace('-TITLE-', "Informe de Footprinting")
orghtml = tagdelete("ALARM", orghtml)
orghtml = orghtml.replace('-DATEMIN-', (rtime - timedelta(minutes=rinterval)).strftime("%H:%M"))
orghtml = orghtml.replace('-DATEMAX-', rtime.strftime("%H:%M"))
orghtml = orghtml.replace('-ORGCHART-', "imgchart_org.png")
orghtml = orghtml.replace('-ORGRADAR-', "imgradar_org.png")
orghtml = orghtml.replace('-ONUMPER-', str(detected_persons))
rorg_eval_data_polar = [0]*len(plugin_dir)
for i in person_index:
for j in range(len(plugin_dir)):
rorg_eval_data_polar[j] += max(people[i].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips))
oplugin = plugin_dir[rorg_eval_data_polar.index(max(rorg_eval_data_polar))]
orghtml = orghtml.replace('-OPLUGIN-', oplugin[8:])
orghtml = orghtml.replace('-ONUMIP-', str(len(rips)))
onumsem = len([a for a in rorg_eval_data_polar if a > 0])
orghtml = orghtml.replace('-ONUMSEN-', str(onumsem))
# Iteramos para cada persona
p1 = orghtml.index("<!--PERSON-->")
p2 = orghtml.index("<!--/PERSON-->")
persontemplate = orghtml[p1:p2+14]
personhtml = ''
for idx in person_index:
htmltemp = persontemplate
htmltemp = htmltemp.replace('-USERNAME-', people[idx].name.encode('ascii', 'xmlcharrefreplace'))
htmltemp = htmltemp.replace('-USERCHART-', 'imgchart_' + people[idx].person + '.png')
htmltemp = htmltemp.replace('-USERRADAR-', 'imgradar_' + people[idx].person + '.png')
pplugin = [p[8:] for p in plugin_dir]
for ds in people[idx].datasources: # Se eliminan las etiquetas de plugins desactivados
if not ds.enabled:
for p in plugin_dir:
if str(ds).count(p):
pplugin.pop(pplugin.index(p[8:]))
p_eval_data_polar = []
for j in range(len(people[idx].datasources)):
if people[idx].datasources[j].enabled:
p_eval_data_polar.append(max(people[idx].datasources[j].eval_data(rtime_frame, rinterval, rtime, rips)))
uplugin = pplugin[p_eval_data_polar.index(max(p_eval_data_polar))]
htmltemp = htmltemp.replace('-UPLUGIN-', uplugin)
unumsem = len([a for a in p_eval_data_polar if a > 0])
htmltemp = htmltemp.replace('-UNUMSEN-', str(unumsem))
people[idx].eval_data(rtime_frame, rinterval, rtime, rips)
if people[idx].alarmed:
if not people[idx].notify:
htmltemp = tagdelete("UNOTIFY", htmltemp)
else:
htmltemp = htmltemp.replace('-UMAIL-', people[idx].email.encode('ascii', 'xmlcharrefreplace'))
else:
htmltemp = tagdelete("UALARMED", htmltemp)
pips = set([d[0] for d in people[idx].get_ips(rinterval + rtime_frame, rtime)])
if pips:
unumip = len(pips.intersection(set(rips)))
else:
unumip = 0
htmltemp = htmltemp.replace('-UNUMIP-', str(unumip))
personhtml += htmltemp
orghtml = orghtml.replace(persontemplate, personhtml)
# Generamos el texto del informe
report_data = []
for idx in person_index:
report_data += people[idx].get_report_data(rinterval + rtime_frame, rtime, rips)
report_data = sorted(report_data, key=itemgetter(0)) # Se ordena por fecha y hora
p1 = orghtml.index("<!--DATAROW-->")
p2 = orghtml.index("<!--/DATAROW-->")
htmlrow = orghtml[p1:p2+15]
p1 = orghtml.index("<!--ALTDATAROW-->")
p2 = orghtml.index("<!--/ALTDATAROW-->")
htmlaltrow = orghtml[p1:p2+18]
rawdata = pygeoip.GeoIP('resources/geoip/GeoLiteCity.dat')
htmltable = ""
noalt = True
for data in report_data:
if noalt:
datarow = htmlrow
else:
datarow = htmlaltrow
datarow = datarow.replace('-EHOUR-', data[0].strftime("%H:%M"))
try:
hostname = str(socket.gethostbyaddr(data[1])[0])
except:
hostname = data[1]
datarow = datarow.replace('-EIP-', hostname)
datarow = datarow.replace('-EDESCRIPT-', data[2].encode('ascii', 'xmlcharrefreplace'))
datarow = datarow.replace('-EPLUGIN-', data[3])
datarow = datarow.replace('-EPERSON-', data[4].encode('ascii', 'xmlcharrefreplace'))
try:
ipdata = rawdata.record_by_name(data[1])
country = ipdata['country_name']
city = ipdata['city']
iplocation = (city + ", " + country).encode('ascii', 'xmlcharrefreplace')
except:
iplocation = "Desconocida"
datarow = datarow.replace('-EGEOIP-', iplocation)
htmltable += datarow
noalt = not noalt
orghtml = tagdelete("DATAROW", orghtml)
orghtml = tagreplace("ALTDATAROW", orghtml, htmltable)
with open(filenamesave, 'w') as f:
orghtml = orghtml.decode('utf8', 'xmlcharrefreplace')
f.write(orghtml.encode('ascii', 'xmlcharrefreplace'))
def save_person_report(rtime_frame, rinterval, rtime, rips, rperson, plugin_dir, filenamesave):
"""
Genera un informe de eventos de footprinting para una persona
"""
with open("resources/mail/personreporttemplate.html", 'r') as f:
personhtml = f.read()
rperson.eval_data(rtime_frame, rinterval, rtime, rips)
person_eval_data = rperson.detection_data
rperson.eval_data(rtime_frame, rinterval, rtime - timedelta(minutes=rinterval), rips)
prev_person_eval_data = rperson.detection_data
personhtml = personhtml.replace('-ORGTHRESHOLD-', str(rperson.alarm_threshold))
personhtml = personhtml.replace('-USERNAME-', rperson.name.encode('ascii', 'xmlcharrefreplace'))
if max(person_eval_data) >= rperson.alarm_threshold:
personhtml = personhtml.replace('-TITLE-', "Alarma de Footprinting")
personhtml = tagdelete("NOALARM", personhtml)
if max(prev_person_eval_data) < rperson.alarm_threshold: # Detección nueva
personhtml = tagdelete("ALARMUP", personhtml)
personhtml = tagdelete("ALARMDOWN", personhtml)
personhtml = tagdelete("ALARMSTABLE", personhtml)
personhtml = personhtml.replace('-CHECKINTERVAL-', str(rinterval))
personhtml = personhtml.replace('-LEVELMAX-', str(max(person_eval_data)))
levelmaxtime = rtime + timedelta(minutes=person_eval_data.index(max(person_eval_data)) - rinterval)
personhtml = personhtml.replace('-LEVELMAXTIME-', levelmaxtime.strftime("%H:%M"))
idxtt = 0
for data in person_eval_data:
if data > rperson.alarm_threshold:
idxtt = data
break
timethreshold = rtime + timedelta(minutes=person_eval_data.index(idxtt) - rinterval)
personhtml = personhtml.replace('-TIMETHRESHOLD-', timethreshold.strftime("%H:%M"))
elif person_eval_data[-1] >= rperson.alarm_threshold: # Continua la alarma
personhtml = tagdelete("NEWALARM", personhtml)
personhtml = tagdelete("ALARMDOWN", personhtml)
if person_eval_data[-1] > prev_person_eval_data[-1]:
personhtml = tagdelete("ALARMSTABLE", personhtml)
else:
personhtml = tagdelete("ALARMUP", personhtml)
personhtml = personhtml.replace('-CHECKINTERVAL-', str(rinterval))
personhtml = personhtml.replace('-LASTLEVEL-', str(person_eval_data[-1]))
elif person_eval_data[-1] < rperson.alarm_threshold: # Se acaba la alarma
personhtml = tagdelete("ALARMUP", personhtml)
personhtml = tagdelete("NEWALARM", personhtml)
personhtml = tagdelete("ALARMSTABLE", personhtml)
personhtml = tagdelete("RUNNINGFOOTPRINTING", personhtml)
idxtt = 0
for data in person_eval_data[::-1]:
if data >= rperson.alarm_threshold:
idxtt = data
break
leveldown = rtime + timedelta(minutes=person_eval_data.index(idxtt) - rinterval)
personhtml = personhtml.replace('-LEVELDOWN-', leveldown.strftime("%H:%M"))
else:
personhtml = personhtml.replace('-TITLE-', "Informe de Footprinting")
personhtml = tagdelete("ALARM", personhtml)
personhtml = personhtml.replace('-DATEMIN-', (rtime - timedelta(minutes=rinterval)).strftime("%H:%M"))
personhtml = personhtml.replace('-DATEMAX-', rtime.strftime("%H:%M"))
personhtml = personhtml.replace('-USERCHART-', 'imgchart_' + rperson.person + '.png')
personhtml = personhtml.replace('-USERRADAR-', 'imgradar_' + rperson.person + '.png')
pplugin = [p[8:] for p in plugin_dir]
for ds in rperson.datasources: # Se eliminan las etiquetas de plugins desactivados
if not ds.enabled:
for p in plugin_dir:
if str(ds).count(p):
pplugin.pop(pplugin.index(p[8:]))
p_eval_data_polar = []
for j in range(len(rperson.datasources)):
if rperson.datasources[j].enabled:
p_eval_data_polar.append(max(rperson.datasources[j].eval_data(rtime_frame, rinterval, rtime, rips)))
uplugin = pplugin[p_eval_data_polar.index(max(p_eval_data_polar))]
personhtml = personhtml.replace('-UPLUGIN-', uplugin)
unumsem = len([a for a in p_eval_data_polar if a > 0])
personhtml = personhtml.replace('-UNUMSEN-', str(unumsem))
rperson.eval_data(rtime_frame, rinterval, rtime, rips)
pips = set([d[0] for d in rperson.get_ips(rinterval + rtime_frame, rtime)])
if pips:
unumip = len(pips.intersection(set(rips)))
else:
unumip = 0
personhtml = personhtml.replace('-UNUMIP-', str(unumip))
# Generamos el texto del informe
report_data = rperson.get_report_data(rinterval + rtime_frame, rtime, rips)
p1 = personhtml.index("<!--DATAROW-->")
p2 = personhtml.index("<!--/DATAROW-->")
htmlrow = personhtml[p1:p2+15]
p1 = personhtml.index("<!--ALTDATAROW-->")
p2 = personhtml.index("<!--/ALTDATAROW-->")
htmlaltrow = personhtml[p1:p2+18]
rawdata = pygeoip.GeoIP('resources/geoip/GeoLiteCity.dat')
htmltable = ""
noalt = True
for data in report_data:
if noalt:
datarow = htmlrow
else:
datarow = htmlaltrow
datarow = datarow.replace('-EHOUR-', data[0].strftime("%H:%M"))
try:
hostname = str(socket.gethostbyaddr(data[1])[0])
except:
hostname = data[1]
datarow = datarow.replace('-EIP-', hostname)
datarow = datarow.replace('-EDESCRIPT-', data[2].encode('ascii', 'xmlcharrefreplace'))
datarow = datarow.replace('-EPLUGIN-', data[3])
try:
ipdata = rawdata.record_by_name(data[1])
country = ipdata['country_name']
city = ipdata['city']
iplocation = (city + ", " + country).encode('ascii', 'xmlcharrefreplace')
except:
iplocation = "Desconocida"
datarow = datarow.replace('-EGEOIP-', iplocation)
htmltable += datarow
noalt = not noalt
personhtml = tagdelete("DATAROW", personhtml)
personhtml = tagreplace("ALTDATAROW", personhtml, htmltable)
with open(filenamesave, 'w') as f:
personhtml = personhtml.decode('utf8', 'xmlcharrefreplace')
f.write(personhtml.encode('ascii', 'xmlcharrefreplace'))
def send_report_mail(mailto, filename, subject):
"""
Envía un fichero html filename (el informe) a email. Las imágenes se incrustan en el correo (deben estar en
la misma carpeta que filename. Se genera también una versión en texto del informe para aquellos clientes de
correo que no soporten html
"""
logger = logging.getLogger('report.watched_pages')
parser = RawConfigParser()
with codecs.open('config/sneaks.conf', 'r', encoding='utf-8') as f:
parser.readfp(f)
smtp_email = ''
smtp_server = ''
smtp_port = 0
smtp_user = ''
smtp_pwd = ''
if parser.has_option('general', 'smtp_email'):
smtp_email = parser.get('general', 'smtp_email')
if not smtp_email:
logger.critical('Error en sneaks.conf: smtp_email')
exit(1)
if parser.has_option('general', 'smtp_server'):
smtp_server = parser.get('general', 'smtp_server')
if not smtp_server:
logger.critical('Error en sneaks.conf: smtp_server')
exit(1)
if parser.has_option('general', 'smtp_port'):
smtp_port = parser.getint('general', 'smtp_port')
if not smtp_port:
logger.critical('Error en sneaks.conf: smtp_port')
exit(1)
if parser.has_option('general', 'smtp_user'):
smtp_user = parser.get('general', 'smtp_user')
if not smtp_user:
logger.critical('Error en sneaks.conf: smtp_user')
exit(1)
if parser.has_option('general', 'smtp_pwd'):
smtp_pwd = parser.get('general', 'smtp_pwd')
if not smtp_pwd:
logger.critical('Error en sneaks.conf: smtp_pwd')
exit(1)
with open(filename, 'r') as f:
orghtml = f.read()
orgtxt = html2text.html2text(orghtml)
msgroot = MIMEMultipart('related')
msgroot['Subject'] = subject
msgroot['From'] = smtp_email
msgroot['To'] = mailto
msgroot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgalternative = MIMEMultipart('alternative')
msgroot.attach(msgalternative)
msgtext = MIMEText(orgtxt.encode('ascii', 'xmlcharrefreplace'))
msgalternative.attach(msgtext)
pattern = re.compile(r"img\w+.png")
images = pattern.findall(orghtml)
msgimages = []
for image in images:
orghtml = orghtml.replace(image, "cid:" + image, 1)
fp = open("temp/" + image, 'rb')
msgimages.append(MIMEImage(fp.read()))
fp.close()
for i in range(len(images)):
msgimages[i].add_header('Content-ID', "<" + images[i] + ">")
msgroot.attach(msgimages[i])
msgtext = MIMEText(orghtml, 'html')
msgalternative.attach(msgtext)
# Send the email (this example assumes SMTP authentication is required)
smtp = smtplib.SMTP(smtp_server, smtp_port)
try:
smtp.ehlo()
# If we can encrypt this session, do it
if smtp.has_extn('STARTTLS'):
smtp.starttls()
smtp.ehlo() # re-identify ourselves over TLS connection
smtp.login(smtp_user, smtp_pwd)
smtp.sendmail(smtp_email, mailto, msgroot.as_string())
finally:
smtp.quit()
| gpl-2.0 | -1,516,157,296,209,164,800 | 39.462842 | 120 | 0.619816 | false | 3.235846 | false | false | false |
rcbuild-info/scrape | rcbi/rcbi/spiders/MultirotorSuperstore.py | 1 | 3983 | import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
import copy
import os
import urlparse
import urllib
MANUFACTURERS = ["Cobra", "Dinogy", "SkyRC", "DYS", "HQProp", "iPower", "Tattu", "GemFan", "SunnySky", "Emax", "ZTW", "MS", "FrSky", "RCTimer", "TBS", "VAS", "DTF UHF", "Pololu", "ImmersionRC", "Hovership", "FatShark", "Hawkeye", "Brotronics", "Argonaut", "3DR", "Tarot", "SkyZone", "Shendrones", "Revolectrix", "Flying Cinema", "Airbot", "Circular Wireless"]
CORRECT = {"GemFan": "Gemfan", "SunnySky": "Sunnysky", "Emax": "EMAX", "MS": "MultirotorSuperstore", "TBS": "Team BlackSheep", "VAS": "Video Aerial Systems", "3DR": "3DRobotics", "SkyZone": "Skyzone", "ShenDrones": "Shendrones"}
NEW_PREFIX = {}
STOCK_STATE_MAP = {"http://schema.org/InStock": "in_stock",
"http://schema.org/OutOfStock": "out_of_stock"}
class MultirotorSuperstoreSpider(CrawlSpider):
name = "multirotorsuperstore"
allowed_domains = ["multirotorsuperstore.com"]
start_urls = ["http://www.multirotorsuperstore.com/"]
rules = (
Rule(LinkExtractor(restrict_css=[".submenu", ".pages"])),
Rule(LinkExtractor(restrict_css=".category-products"), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
product_name = response.css(".product-name")
if not product_name:
return
item["name"] = product_name[0].xpath("//h1/text()").extract()[0]
variant = {}
item["variants"] = [variant]
parsed = urlparse.urlparse(response.url)
filename = "/" + os.path.basename(parsed[2])
variant["url"] = urlparse.urlunparse((parsed[0], parsed[1], filename,
parsed[3], parsed[4], parsed[5]))
for m in MANUFACTURERS:
if item["name"].startswith(m):
item["name"] = item["name"][len(m):].strip("- ")
item["manufacturer"] = m
break
if "manufacturer" in item:
m = item["manufacturer"]
if m in NEW_PREFIX:
item["name"] = NEW_PREFIX[m] + " " + item["name"]
if m in CORRECT:
item["manufacturer"] = CORRECT[m]
superproduct = response.css("#super-product-table")
if not superproduct:
availability = response.css("[itemprop=\"availability\"]::attr(href)")
if availability and availability.extract_first() in STOCK_STATE_MAP:
variant["stock_state"] = STOCK_STATE_MAP[availability.extract_first()]
variant["stock_text"] = response.css(".availability>span::text").extract_first().strip()
elif availability:
print(availability)
price = response.css(".product-essential .regular-price .price::text")
if price:
special = response.css(".product-essential .special-price .price::text")
if special:
variant["price"] = special.extract_first().strip()
else:
variant["price"] = price.extract_first().strip()
else:
subproducts = superproduct.css("tbody>tr")
first = True
in_stock = response.css(".product-essential .in-stock")
if not in_stock:
variant["stock_state"] = "out_of_stock"
for subproduct in subproducts:
cols = subproduct.css("td")
if first:
first = False
else:
variant = copy.deepcopy(variant)
item["variants"].append(variant)
variant["description"] = cols[0].css("::text").extract_first().strip()
if in_stock:
quantity_field = cols[2].css("input")
if quantity_field:
variant["stock_state"] = "in_stock"
else:
variant["stock_state"] = "out_of_stock"
# Do price last so we can copy for tiered pricing.
price = cols[1].css(".regular-price .price::text")
if price:
variant["price"] = price.extract_first().strip()
# TODO(tannewt): Support tiered pricing.
return item
| apache-2.0 | 1,018,846,798,764,537,900 | 38.435644 | 359 | 0.614361 | false | 3.415952 | false | false | false |
mchung94/solitaire-player | pysolvers/solvers/deck.py | 1 | 2238 | """Card and Deck definitions.
Cards are strings containing a rank character followed by a suit character,
because it's simpler than defining a class or named tuple while still being
immutable, hashable, easy to create, and human-readable.
I also want to define a deck as just a tuple of cards that contain exactly
all 52 cards in a standard deck. I think this is the simplest way with the
fewest surprises/pitfalls.
"""
import collections
RANKS = 'A23456789TJQK'
SUITS = 'cdhs'
CARDS = [f'{rank}{suit}' for suit in SUITS for rank in RANKS]
CARDS_SET = set(CARDS)
def is_card(obj):
"""Return true if the object is a card."""
return obj in CARDS_SET
def card_rank(card):
"""Return the card's rank as a character."""
return card[0]
def malformed_cards(tuple_of_objects):
"""Return a list of the objects in the tuple that aren't cards.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
return [obj for obj in tuple_of_objects if not is_card(obj)]
def missing_cards(tuple_of_cards):
"""Return a list of the standard cards that are missing from the tuple.
Return the missing cards in consistent order by suit and rank.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
cards = set(tuple_of_cards)
return [card for card in CARDS if card not in cards]
def duplicate_cards(tuple_of_cards):
"""Return a list of the cards that are duplicated in the tuple.
If a card is duplicated N times, the card should be in the result N times
so that the caller knows how many times it's been duplicated.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
c = collections.Counter(tuple_of_cards)
return [card for card in tuple_of_cards if c[card] > 1]
def is_standard_deck(tuple_of_cards):
"""Return true if the tuple of cards is a standard 52-card deck."""
if not isinstance(tuple_of_cards, tuple):
return False
return len(tuple_of_cards) == 52 and not missing_cards(tuple_of_cards)
| mit | -3,498,415,241,772,121,600 | 32.402985 | 78 | 0.714924 | false | 3.705298 | false | false | false |
vlegoff/tsunami | src/secondaires/navigation/chantier_naval.py | 1 | 4209 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe ChantierNaval, détaillée plus bas."""
from abstraits.obase import BaseObj
from secondaires.navigation.commande_chantier import *
class ChantierNaval(BaseObj):
"""Classe décrivant un chantier naval.
Un chantier naval est un ensemble de salles que l'on peut utiliser pour
la réparation et la personnalisation d'un navire en particulier. Un
chantier naval possède une salle d'interaction (nommée 'salle_magasin')
et des points d'occupation qui déterminent le lieu des bassins. Si le
navire souhaité n'est pas dans le bassin d'un chantier, le chantier ne
pourra pas travailler dessus.
"""
enregistrer = True
_nom = "chantier_navale"
_version = 1
def __init__(self, cle):
BaseObj.__init__(self)
self.cle = cle
self.salle_magasin = None
self.etendue = None
self.points = []
self.commandes = []
self.cales_seches = []
self._construire()
def __getnewargs__(self):
return ("inconnu", )
def __repr__(self):
return "<ChantierNaval {}>".format(repr(self.cle))
def __str__(self):
return self.cle
def ajouter_commande(self, instigateur, navire, nom_type, duree, *args):
"""Ajout d'une nouvelle commande.
Les paramètres à préciser sont :
instigateur -- le personnage ayant ordonné la commande
navire -- le navire concerné
nom_type -- le type de la commande
duree -- la durée de la commande (en minutes)
*args -- les arguments supplémentaire soptionnels propres au type.
"""
commande = CommandeChantierNaval(self, instigateur, navire, nom_type,
duree, *args)
self.commandes.append(commande)
def get_navires_possedes(self, personnage):
"""Retourne les navires dans le chantier naval."""
navires = [n for n in importeur.navigation.navires.values() if \
n.proprietaire is personnage]
navires = [n for n in navires if n.etendue]
navires = [n for n in navires if (int(n.position.x),
int(n.position.y), int(n.position.z)) in self.points]
navires.sort(key=lambda n: n.cle)
return navires
def executer_commandes(self):
"""Exécute les commandes à faire."""
for commande in list(self.commandes):
if commande.a_faire:
try:
commande.executer()
except CommandeInterrompue:
pass
else:
self.commandes.remove(commande)
| bsd-3-clause | 7,937,201,013,783,846,000 | 38.54717 | 79 | 0.673903 | false | 3.638889 | false | false | false |
bhaveshAn/crisscross | crisscross/facades/email.py | 1 | 1397 | '''
Email
=====
The :class:`Email` provides access to public methods to use email of your
device.
.. note::
On Android `INTERNET` permission is needed.
Simple Examples
---------------
To send an e-mail::
>>> from crisscross import email
>>> recipient = '[email protected]'
>>> subject = 'Hi'
>>> text = 'This is an example.'
>>> create_chooser = False
>>> email.send(recipient=recipient, subject=subject, text=text,
create_chooser=create_chooser)
>>> # opens email interface where user can change the content.
'''
class Email(object):
'''
Email facade.
'''
def send(self, recipient=None, subject=None, text=None,
create_chooser=None):
'''
Open an email client message send window, prepopulated with the
given arguments.
:param recipient: Recipient of the message (str)
:param subject: Subject of the message (str)
:param text: Main body of the message (str)
:param create_chooser: Whether to display a program chooser to
handle the message (bool)
.. note:: create_chooser is only supported on Android
'''
self._send(recipient=recipient, subject=subject, text=text,
create_chooser=create_chooser)
# private
def _send(self, **kwargs):
raise NotImplementedError()
| mit | 5,783,431,339,681,985,000 | 24.87037 | 73 | 0.601288 | false | 4.352025 | false | false | false |
jvivian/rnaseq-lib | src/rnaseq_lib/utils/expando.py | 1 | 2336 | # Taken from: https://github.com/BD2KGenomics/bd2k-python-lib
class Expando(dict):
"""
Pass inital attributes to the constructor:
>>> o = Expando(foo=42)
>>> o.foo
42
Dynamically create new attributes:
>>> o.bar = 'hi'
>>> o.bar
'hi'
Expando is a dictionary:
>>> isinstance(o,dict)
True
>>> o['foo']
42
Works great with JSON:
>>> import json
>>> s='{"foo":42}'
>>> o = json.loads(s,object_hook=Expando)
>>> o
{u'foo': 42}
>>> o.foo
42
>>> o.bar = 'hi'
>>> o
{u'foo': 42, 'bar': 'hi'}
And since Expando is a dict, it serializes back to JSON just fine:
>>> json.dumps(o)
'{"foo": 42, "bar": "hi"}'
Attributes can be deleted, too:
>>> o = Expando(foo=42)
>>> o.foo
42
>>> del o.foo
>>> o.foo
Traceback (most recent call last):
...
AttributeError: 'Expando' object has no attribute 'foo'
>>> o['foo']
Traceback (most recent call last):
...
KeyError: 'foo'
>>> del o.foo
Traceback (most recent call last):
...
AttributeError: foo
And copied:
>>> o = Expando(foo=42)
>>> p = o.copy()
>>> isinstance(p,Expando)
True
>>> o == p
True
>>> o is p
False
Same with MagicExpando ...
>>> o = MagicExpando()
>>> o.foo.bar = 42
>>> p = o.copy()
>>> isinstance(p,MagicExpando)
True
>>> o == p
True
>>> o is p
False
... but the copy is shallow:
>>> o.foo is p.foo
True
"""
def __init__(self, *args, **kwargs):
super(Expando, self).__init__(*args, **kwargs)
self.__slots__ = None
self.__dict__ = self
def copy(self):
return type(self)(self)
class MagicExpando(Expando):
"""
Use MagicExpando for chained attribute access. The first time a missing attribute is
accessed, it will be set to a new child MagicExpando.
>>> o=MagicExpando()
>>> o.foo = 42
>>> o
{'foo': 42}
>>> o.bar.hello = 'hi'
>>> o
{'foo': 42, 'bar': {'hello': 'hi'}}
"""
def __getattribute__(self, name):
try:
return super(Expando, self).__getattribute__(name)
except AttributeError:
child = self.__class__()
self[name] = child
return child
| mit | -5,991,703,706,041,165,000 | 18.305785 | 88 | 0.51113 | false | 3.481371 | false | false | false |
QualiSystems/OpenStack-Shell | package/tests/test_cp/test_openstack/test_command/test_operations/test_connectivity_operation.py | 1 | 1574 | from unittest import TestCase
from mock import Mock
from cloudshell.cp.openstack.command.operations.connectivity_operation import ConnectivityOperation
class TestConnectivityOperation(TestCase):
def setUp(self):
self.connectivity_service = Mock()
self.conn_operation = ConnectivityOperation(connectivity_service=self.connectivity_service)
self.conn_operation.connectivity_service = Mock()
self.os_session = Mock()
self.cp_resource_model = Mock()
self.logger = Mock()
pass
def test_connectivity_operation_apply_connectivity(self):
connectivity_request = Mock()
mock_result = Mock()
#self.conn_operation.apply_connectivity = Mock(return_value=mock_result)
self.conn_operation.apply_connectivity(openstack_session=self.os_session,
cp_resource_model=self.cp_resource_model,
conn_request=connectivity_request,
logger=self.logger)
self.conn_operation.connectivity_service.perform_apply_connectivity.assert_called_with(
openstack_session=self.os_session,
cp_resource_model=self.cp_resource_model,
connection_request=connectivity_request,
logger=self.logger)
| isc | -8,626,905,349,689,318,000 | 49.774194 | 109 | 0.551461 | false | 5.503497 | false | false | false |
gtalarico/pyrevitplus | pyRevitPlus.tab/VP Tools.panel/Levels.pulldown/Save Levels.pushbutton/script.py | 1 | 4676 | """
Save Levels
Save the view dependant properties -
endpoint locations, level heads and leaders
of the selected building levels for re-use
Non-level elements will be skipped with dialog,
so it's advisable to apply filtering beforehead
TESTED REVIT API: 2020
@ejs-ejs
This script is part of PyRevitPlus: Extensions for PyRevit
github.com/ejs-ejs | @ejs-ejs
--------------------------------------------------------
RevitPythonWrapper: revitpythonwrapper.readthedocs.io
pyRevit: github.com/eirannejad/pyRevit
"""
import os
import pickle
from tempfile import gettempdir
from collections import namedtuple
import rpw
from rpw import doc, uidoc, DB, UI
Point = namedtuple('Point', ['X', 'Y','Z'])
Axis = namedtuple('Axis', ['Name', 'Start', 'End','StartBubble', 'EndBubble', 'StartBubbleVisible', 'EndBubbleVisible'])
tempfile = os.path.join(gettempdir(), 'LevelPlacement')
cView = doc.ActiveView
if not(cView.ViewType == DB.ViewType.Section or cView == DB.ViewType.Elevation):
UI.TaskDialog.Show('pyRevitPlus', 'View type \'{}\' not supported'.format(cView.ViewType))
else:
experimental = True
UI.TaskDialog.Show('pyRevitPlus', 'Support for \'{}\' view type is experimental!'.format(cView.ViewType))
selection = rpw.ui.Selection()
#if len(selection) <> 1:
# UI.TaskDialog.Show('pyRevitPlus', 'Select a single grid line!')
# exit(0);
n=0
LevelLines = dict()
for cLevel in selection:
el = cLevel.unwrap()
if isinstance(el, DB.Level):
curves=el.GetCurvesInView(DB.DatumExtentType.ViewSpecific, cView)
if len(curves) <> 1:
UI.TaskDialog.Show('pyRevitPlus', 'The level line is defind by {} curves, unable to proceed', len(curves))
else:
cLevelLine = {'Name':'', 'Start': Point(0,0,0), 'End': Point(0,0,0), 'StartBubble': False, 'StartBubbleVisible': False, 'EndBubble': False, 'EndBubbleVisible': False}
cCurve = curves[0]
leader0 = el.GetLeader(DB.DatumEnds.End0, cView)
if leader0:
tmp = leader0.Elbow
cLevelLine['Leader0Elbow'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader0.End
cLevelLine['Leader0End'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader0.Anchor
cLevelLine['Leader0Anchor'] = Point(tmp.X, tmp.Y,tmp.Z)
leader1 = el.GetLeader(DB.DatumEnds.End1, cView)
if leader1:
tmp = leader1.Elbow
cLevelLine['Leader1Elbow'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader1.End
cLevelLine['Leader1End'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = leader1.Anchor
cLevelLine['Leader1Anchor'] = Point(tmp.X, tmp.Y,tmp.Z)
cLevelLine['Name'] = el.Name
tmp = cCurve.GetEndPoint(0)
cLevelLine['Start'] = Point(tmp.X, tmp.Y,tmp.Z)
tmp = cCurve.GetEndPoint(1)
cLevelLine['End'] = Point(tmp.X, tmp.Y,tmp.Z)
if el.HasBubbleInView(DB.DatumEnds.End0, cView):
cLevelLine['StartBubble']=True
if el.HasBubbleInView(DB.DatumEnds.End1, cView):
cLevelLine['EndBubble']=True
if el.IsBubbleVisibleInView(DB.DatumEnds.End0, cView):
cLevelLine['StartBubbleVisible']=True
if el.IsBubbleVisibleInView(DB.DatumEnds.End1, cView):
cLevelLine['EndBubbleVisible']=True
#if isinstance(cCurve, DB.Arc):
# tmp = cCurve.Center
# cLevelLine['Center'] = Point(tmp.X, tmp.Y,tmp.Z)
LevelLines[cLevelLine['Name']] = cLevelLine
n += 1
else:
#if isinstance(el, DB.MultiSegmentGrid):
# UI.TaskDialog.Show('pyRevitPlus', 'Skipping yet unsupported Multi-Segment grid \'{}\''.format(el.Name))
#else:
UI.TaskDialog.Show('pyRevitPlus', 'Skipping non- level element \'{}\''.format(el.Name))
if n<>1:
msg = 'Saved {} level placements to {}'.format(n,tempfile)
else:
msg = 'Saved level \'{}\' placement to {}'.format(cLevelLine['Name'],tempfile)
if n>0:
with open(tempfile, 'wb') as fp:
pickle.dump(LevelLines, fp)
# close(fp)
UI.TaskDialog.Show('pyRevitPlus', msg)
else:
UI.TaskDialog.Show('pyRevitPlus', 'Nothing to save')
| gpl-3.0 | -4,241,314,254,409,846,300 | 36.408 | 182 | 0.565013 | false | 3.529057 | false | false | false |
ec429/sexpy | sml.py | 1 | 4208 | #!/usr/bin/python
from sexpy import SExp, tok_STRING
class HtmlNodeType(object):
def __init__(self, name, allowed_children):
self.name = name
self.ac = allowed_children
def __call__(self, context, *args):
if self.ac is None: # not specified
return
for arg in args:
if isinstance(arg, SExp):
if isinstance(arg.tpl[0], HtmlNodeType):
if arg.tpl[0].name not in self.ac:
raise Exception(arg.tpl[0].name, "not allowed as child of", self.name)
class HtmlAttributeType(HtmlNodeType):
def __init__(self, name):
super(HtmlAttributeType, self).__init__(name, [])
def __call__(self, context, *args):
super(HtmlAttributeType, self).__call__(context, *args)
content = []
for arg in args:
if isinstance(arg, SExp):
content.append(arg.eval(context))
else:
content.append(arg)
return HtmlAttribute(self.name, content)
class HtmlElementType(HtmlNodeType):
def __call__(self, context, *args):
super(HtmlElementType, self).__call__(context, *args)
attrs = []
content = []
for arg in args:
if isinstance(arg, SExp):
val = arg.eval(context)
if isinstance(val, HtmlElement):
content.append(val)
elif isinstance(val, HtmlAttribute):
attrs.append(val)
else:
assert 0, val
else:
content.append(str(arg))
return HtmlElement(self.name, attrs, content)
class HtmlAttribute(object):
def __init__(self, name, content):
self.name = name
self.content = content
def __str__(self):
return '%s="%s"'%(self.name, ' '.join(map(str, self.content)))
class HtmlElement(object):
def __init__(self, name, attrs, content):
self.name = name
self.attrs = attrs
self.content = content
def __str__(self):
opentag = ' '.join([self.name,] + map(str, self.attrs))
if self.content:
return '<%s>'%opentag + ' '.join(map(str, self.content)) + '</%s>'%(self.name,)
else:
return '<%s/>'%opentag
HTML = {'html':HtmlElementType('html', ['head', 'body']),
'head':HtmlElementType('head', ['title', 'meta', 'script', 'style', 'link']),
'title':HtmlElementType('title', []),
'meta':HtmlElementType('meta', ['http-equiv', 'content', 'name', 'scheme', 'charset']),
'http-equiv':HtmlAttributeType('http-equiv'),
'content':HtmlAttributeType('content'),
'name':HtmlAttributeType('name'),
'scheme':HtmlAttributeType('scheme'),
'charset':HtmlAttributeType('charset'),
'script':HtmlElementType('script', ['src', 'type', 'defer']),
'src':HtmlAttributeType('src'),
'type':HtmlAttributeType('type'),
'defer':HtmlAttributeType('defer'),
'style':HtmlElementType('style', ['type']),
'link':HtmlElementType('link', ['rel', 'type', 'href']),
'rel':HtmlAttributeType('rel'),
# '':HtmlAttributeType(''),
'body':HtmlElementType('body', None),
'a':HtmlElementType('a', None),
'href':HtmlAttributeType('href'),
'p':HtmlElementType('p', None),
'h1':HtmlElementType('h1', None),
'h2':HtmlElementType('h2', None),
'h3':HtmlElementType('h3', None),
'h4':HtmlElementType('h4', None),
'h5':HtmlElementType('h5', None),
'h6':HtmlElementType('h6', None),
'br':HtmlElementType('br', []),
'blockquote':HtmlElementType('blockquote', None),
'img':HtmlElementType('img', ['src']),
# '':HtmlElementType('', None),
}
if __name__ == '__main__':
test = """(html (head (title SML generated page))
(body (h1 SML generated page)
(p A simple HTML page generated from SML. (br)
(a (href /index.htm) Index)
)
)
)"""
s = SExp.parse(test)
print s
print s.eval(HTML)
| mit | 5,023,051,822,639,744,000 | 38.327103 | 95 | 0.533745 | false | 3.903525 | false | false | false |
thepizzaking/whaawmp | src/common/lists.py | 1 | 5662 | # -*- coding: utf-8 -*-
# A few useful lists.
# Copyright © 2007-2011, Jeff Bailes <[email protected]>.
# This file is part of Whaaw! Media Player (whaawmp)
#
# whaawmp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the Licence, or
# (at your option) any later version.
#
# whaawmp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The Whaaw! Media Player project hereby grants permission for non-GPL
# compatible GStreamer plugins to be used and distributed together with
# GStreamer and Whaaw! Media Player. This permission is above and beyond
# the permissions granted by the GPL licence by which Whaaw! Media Player
# is covered. (See COPYING file for more details)
## The mime type list of compatable files, for open dialogue.
compatFiles = ['application/ogg', 'application/ram', 'application/smil',
'application/vnd.rn-realmedia', 'application/x-extension-m4a',
'application/x-extension-mp4', 'application/x-flac',
'application/x-flash-video', 'application/x-matroska',
'application/x-ogg', 'application/x-quicktime-media-link',
'application/x-quicktimeplayer', 'application/x-shockwave-flash',
'application/x-shorten', 'application/x-smil', 'application/xspf+xml',
'audio/3gpp', 'audio/ac3', 'audio/AMR', 'audio/AMR-WB', 'audio/basic',
'audio/mp4', 'audio/mpeg', 'audio/mpegurl', 'audio/vnd.rn-realaudio',
'audio/x-ape', 'audio/x-flac', 'audio/x-it', 'audio/x-m4a',
'audio/x-matroska', 'audio/x-mod', 'audio/x-mp3', 'audio/x-mpeg',
'audio/x-mpegurl', 'audio/x-ms-asf', 'audio/x-ms-asx', 'audio/x-ms-wax',
'audio/x-ms-wma', 'audio/x-musepack', 'audio/x-pn-aiff', 'audio/x-pn-au',
'audio/x-pn-realaudio', 'audio/x-pn-realaudio-plugin', 'audio/x-pn-wav',
'audio/x-pn-windows-acm', 'audio/x-realaudio', 'audio/x-real-audio',
'audio/x-scpls', 'audio/x-tta', 'audio/x-wav', 'audio/x-wav',
'audio/x-wavpack', 'image/vnd.rn-realpix', 'image/x-pict', 'misc/ultravox',
'text/google-video-pointer', 'text/x-google-video-pointer', 'video/3gpp',
'video/dv', 'video/fli', 'video/flv', 'video/mp4', 'video/mp4v-es',
'video/mpeg', 'video/msvideo', 'video/quicktime', 'video/vivo',
'video/vnd.divx', 'video/vnd.rn-realvideo', 'video/vnd.vivo', 'video/webm', 'video/x-anim',
'video/x-avi', 'video/x-flc', 'video/x-fli', 'video/x-flic', 'video/x-m4v',
'video/x-matroska', 'video/x-mpeg', 'video/x-ms-asf', 'video/x-msvideo',
'video/x-ms-wm', 'video/x-ms-wmv', 'video/x-ms-wmx', 'video/x-ms-wvx',
'video/x-nsv', 'video/x-ogm+ogg', 'video/x-theora+ogg', 'text/uri-list']
## The widgets that are normally hidden.
hiddenNormalWidgets = ['btnLeaveFullscreen']
## A list of widgets to hide on fullscreen.
hiddenFSWidgets = ['menubar', 'hboxTop', 'hboxControl', 'hboxBottom', 'btnLeaveFullscreen']
## The list of widgets to reshow when the mouse is moved (fullscreen).
fsShowWMouse = ['hboxControl', 'hboxBottom', 'btnLeaveFullscreen']
## A dicrtionary with all the default options.
defaultOptions = { 'video/brightness' : 0,
'video/contrast' : 1,
'video/hue' : 0,
'video/saturation' : 1,
'video/force-aspect-ratio' : True,
'video/videosink' : 'default',
'video/autosub' : False,
'video/autosubexts' : 'srt,idx,sub,ssa,ass',
'video/subfont' : 'Sans 20', # TODO: maybe tweak this.
'video/subenc' : '', # Empty means use default encoding
'gui/mousehidetimeout' : 2000,
'gui/instantseek' : False,
'gui/showtimeremaining' : False,
'gui/enablevisualisation' : False,
'gui/iconsize' : 1,
'gui/fileastitle' : True,
'gui/shownextbutton' : True,
'gui/showrestartbutton' : False,
'gui/tagsyntax' : '{artist} - {title}',
'audio/volume' : 0.75,
'audio/audiosink' : 'default',
'audio/audiodevice' : '',
'misc/onextnewfile' : 1,
'misc/disablescreensaver' : True }
## Some gstreamer lists.
## A list of gstreamer stream types (in order too!).
gstStreamType = [ 'unknown', 'audio', 'video', 'text', 'element' ]
## Available colour settings (Remember to use .lower() if lowercase required.
colourSettings = [ 'Brightness', 'Contrast', 'Hue', 'Saturation' ]
## A dictionary for keystrokes and the signals each should emit.
keypressDict = { 'space' : ['toggle-play-pause'],
'f' : ['toggle-fullscreen'],
'F11' : ['toggle-fullscreen'],
'n' : ['play-next'],
'p' : ['restart-track'],
'r' : ['restart-track'],
'q' : ['toggle-queue'],
'a' : ['toggle-advanced-controls']}
| gpl-3.0 | 9,084,138,836,555,284,000 | 51.906542 | 106 | 0.583819 | false | 3.328042 | false | false | false |
babbageclunk/gatesym | gatesym/core_ffi.py | 1 | 1467 | from cffi import FFI
import os
SO_PATH = os.path.expanduser('~/Dropbox/code/rust/gatesymcore/target/release/libgatesymcore.so')
ffi = FFI()
ffi.cdef("""
void *network_new();
void network_free(void *ptr);
size_t network_add_gate(void *ptr, uint8_t kind, uint32_t cookie);
void network_add_link(void *ptr, size_t source_index, size_t dest_index, uint8_t negate);
uint8_t network_read(void *ptr, size_t gate_index);
void network_write(void *ptr, size_t gate_index, uint8_t value);
size_t network_drain(void *ptr);
""")
lib = ffi.dlopen(SO_PATH)
TIE, SWITCH, AND, OR = range(4)
class Network(object):
def __init__(self):
self._ptr = lib.network_new()
self._cookies = []
def __del__(self):
lib.network_free(self._ptr)
def add_gate(self, type_, cookie):
self._cookies.append(cookie)
return lib.network_add_gate(self._ptr, type_, len(self._cookies))
def add_link(self, source_index, dest_index, negate=False):
assert dest_index >= 0
assert source_index >= 0
lib.network_add_link(self._ptr, source_index, dest_index, negate)
def read(self, gate_index):
assert gate_index >= 0
return bool(lib.network_read(self._ptr, gate_index))
def write(self, gate_index, value):
assert gate_index >= 0
lib.network_write(self._ptr, gate_index, value)
def drain(self):
return lib.network_drain(self._ptr)
| mit | 7,358,668,042,376,080,000 | 28.938776 | 96 | 0.629857 | false | 3.161638 | false | false | false |
blckshrk/Weboob | weboob/applications/traveloob/traveloob.py | 1 | 4156 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon, Julien Hébert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import sys
from datetime import datetime
import logging
from weboob.capabilities.travel import ICapTravel, RoadmapFilters
from weboob.tools.application.repl import ReplApplication, defaultcount
__all__ = ['Traveloob']
class Traveloob(ReplApplication):
APPNAME = 'traveloob'
VERSION = '0.h'
COPYRIGHT = 'Copyright(C) 2010-2011 Romain Bignon'
DESCRIPTION = "Console application allowing to search for train stations and get departure times."
SHORT_DESCRIPTION = "search for train stations and departures"
CAPS = ICapTravel
DEFAULT_FORMATTER = 'table'
def add_application_options(self, group):
group.add_option('--departure-time')
group.add_option('--arrival-time')
@defaultcount(10)
def do_stations(self, pattern):
"""
stations PATTERN
Search stations.
"""
for backend, station in self.do('iter_station_search', pattern):
self.format(station)
@defaultcount(10)
def do_departures(self, line):
"""
departures STATION [ARRIVAL]
List all departures for a given station.
"""
station, arrival = self.parse_command_args(line, 2, 1)
station_id, backend_name = self.parse_id(station)
if arrival:
arrival_id, backend_name2 = self.parse_id(arrival)
if backend_name and backend_name2 and backend_name != backend_name2:
logging.error('Departure and arrival aren\'t on the same backend')
return 1
else:
arrival_id = backend_name2 = None
if backend_name:
backends = [backend_name]
elif backend_name2:
backends = [backend_name2]
else:
backends = None
for backend, departure in self.do('iter_station_departures', station_id, arrival_id, backends=backends):
self.format(departure)
def do_roadmap(self, line):
"""
roadmap DEPARTURE ARRIVAL
Display the roadmap to travel from DEPARTURE to ARRIVAL.
Command-line parameters:
--departure-time TIME requested departure time
--arrival-time TIME requested arrival time
TIME might be in form "yyyy-mm-dd HH:MM" or "HH:MM".
Example:
> roadmap Puteaux Aulnay-sous-Bois --arrival-time 22:00
"""
departure, arrival = self.parse_command_args(line, 2, 2)
filters = RoadmapFilters()
try:
filters.departure_time = self.parse_datetime(self.options.departure_time)
filters.arrival_time = self.parse_datetime(self.options.arrival_time)
except ValueError as e:
print >>sys.stderr, 'Invalid datetime value: %s' % e
print >>sys.stderr, 'Please enter a datetime in form "yyyy-mm-dd HH:MM" or "HH:MM".'
return 1
for backend, route in self.do('iter_roadmap', departure, arrival, filters):
self.format(route)
def parse_datetime(self, text):
if text is None:
return None
try:
date = datetime.strptime(text, '%Y-%m-%d %H:%M')
except ValueError:
try:
date = datetime.strptime(text, '%H:%M')
except ValueError:
raise ValueError(text)
date = datetime.now().replace(hour=date.hour, minute=date.minute)
return date
| agpl-3.0 | -5,384,248,984,045,821,000 | 32.24 | 112 | 0.633694 | false | 3.916117 | false | false | false |
clones/django-evolution | django_evolution/tests/multi_db.py | 1 | 29302 | from django_evolution.tests.utils import test_sql_mapping
tests = r"""
>>> from django.db import models
>>> from django_evolution.mutations import ChangeField
>>> from django_evolution.tests.utils import test_proj_sig_multi, execute_test_sql, register_models_multi, deregister_models
>>> from django_evolution.diff import Diff
>>> import copy
# Use Cases:
# Setting a null constraint
# -- without an initial value
# -- with a null initial value
# -- with a good initial value (constant)
# -- with a good initial value (callable)
# Removing a null constraint
# Invoking a no-op change field
# Changing the max_length of a character field
# -- increasing the max_length
# -- decreasing the max_length
# Renaming a column
# Changing the db_table of a many to many relationship
# Adding an index
# Removing an index
# Adding a unique constraint
# Removing a unique constraint
# Redundant attributes. (Some attribute have changed, while others haven't but are specified anyway.)
# Changing more than one attribute at a time (on different fields)
# Changing more than one attribute at a time (on one field)
### This one is a bit dubious because changing the primary key of a model will mean
### that all referenced foreign keys and M2M relationships need to be updated
# Adding a primary key constraint
# Removing a Primary Key (Changing the primary key column)
# Options that apply to all fields:
# DB related options
# null
# db_column
# db_index
# db_tablespace (Ignored)
# primary_key
# unique
# db_table (only for many to many relationships)
# -- CharField
# max_length
# Non-DB options
# blank
# core
# default
# editable
# help_text
# radio_admin
# unique_for_date
# unique_for_month
# unique_for_year
# validator_list
# I don't know yet
# choices
>>> class ChangeSequenceFieldInitial(object):
... def __init__(self, suffix):
... self.suffix = suffix
...
... def __call__(self):
... from django.db import connections
... qn = connections['db_multi'].ops.quote_name
... return qn('char_field')
# Now, a useful test model we can use for evaluating diffs
>>> class ChangeAnchor1(models.Model):
... value = models.IntegerField()
>>> class ChangeBaseModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
# Store the base signatures
>>> anchors = [('ChangeAnchor1', ChangeAnchor1)]
>>> test_model = ('TestModel', ChangeBaseModel)
>>> start = register_models_multi('tests', 'db_multi', *anchors)
>>> start.update(register_models_multi('tests', 'db_multi', test_model))
>>> start_sig = test_proj_sig_multi('tests', test_model, *anchors)
# Setting a null constraint without an initial value
>>> class SetNotNullChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=False)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', SetNotNullChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', SetNotNullChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field1':
Property 'null' has changed
>>> print [str(e) for e in d.evolution()['tests']] # SetNotNullChangeModel
["ChangeField('TestModel', 'char_field1', initial=<<USER VALUE REQUIRED>>, null=False)"]
# Without an initial value
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False)]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
Traceback (most recent call last):
...
SimulationFailure: Cannot change column 'char_field1' on 'tests.TestModel' without a non-null initial value.
# With a null initial value
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False, initial=None)]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
Traceback (most recent call last):
...
SimulationFailure: Cannot change column 'char_field1' on 'tests.TestModel' without a non-null initial value.
# With a good initial value (constant)
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False, initial="abc's xyz")]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # SetNotNullChangedModelWithConstant
%(SetNotNullChangeModelWithConstant)s
# With a good initial value (callable)
>>> evolution = [ChangeField('TestModel', 'char_field1', null=False, initial=ChangeSequenceFieldInitial('SetNotNullChangeModel'))]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # SetNotNullChangeModelWithCallable
%(SetNotNullChangeModelWithCallable)s
# Removing a null constraint
>>> class SetNullChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', SetNullChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', SetNullChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field2':
Property 'null' has changed
>>> print [str(e) for e in d.evolution()['tests']] # SetNullChangeModel
["ChangeField('TestModel', 'char_field2', initial=None, null=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # SetNullChangeModel
%(SetNullChangeModel)s
# Removing a null constraint
>>> class NoOpChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', NoOpChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', NoOpChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
<BLANKLINE>
>>> evolution = [ChangeField('TestModel', 'char_field1', null=True)]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in evolution:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # NoOpChangeModel
%(NoOpChangeModel)s
# Increasing the max_length of a character field
>>> class IncreasingMaxLengthChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=45)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', IncreasingMaxLengthChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', IncreasingMaxLengthChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field':
Property 'max_length' has changed
>>> print [str(e) for e in d.evolution()['tests']] # IncreasingMaxLengthChangeModel
["ChangeField('TestModel', 'char_field', initial=None, max_length=45)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # IncreasingMaxLengthChangeModel
%(IncreasingMaxLengthChangeModel)s
# Decreasing the max_length of a character field
>>> class DecreasingMaxLengthChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=1)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', DecreasingMaxLengthChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', DecreasingMaxLengthChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field':
Property 'max_length' has changed
>>> print [str(e) for e in d.evolution()['tests']] # DecreasingMaxLengthChangeModel
["ChangeField('TestModel', 'char_field', initial=None, max_length=1)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # DecreasingMaxLengthChangeModel
%(DecreasingMaxLengthChangeModel)s
# Renaming a column
>>> class DBColumnChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='customised_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', DBColumnChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', DBColumnChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field':
Property 'db_column' has changed
>>> print [str(e) for e in d.evolution()['tests']] # DBColumnChangeModel
["ChangeField('TestModel', 'int_field', initial=None, db_column='customised_db_column')"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # DBColumnChangeModel
%(DBColumnChangeModel)s
# Changing the db_table of a many to many relationship
>>> class M2MDBTableChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='custom_m2m_db_table_name')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', M2MDBTableChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', M2MDBTableChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'm2m_field1':
Property 'db_table' has changed
>>> print [str(e) for e in d.evolution()['tests']] # M2MDBTableChangeModel
["ChangeField('TestModel', 'm2m_field1', initial=None, db_table='custom_m2m_db_table_name')"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # M2MDBTableChangeModel
%(M2MDBTableChangeModel)s
# Adding an index
>>> class AddDBIndexChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=True)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', AddDBIndexChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', AddDBIndexChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field2':
Property 'db_index' has changed
>>> print [str(e) for e in d.evolution()['tests']] # AddDBIndexChangeModel
["ChangeField('TestModel', 'int_field2', initial=None, db_index=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # AddDBIndexChangeModel
%(AddDBIndexChangeModel)s
# Removing an index
>>> class RemoveDBIndexChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=False)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', RemoveDBIndexChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', RemoveDBIndexChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field1':
Property 'db_index' has changed
>>> print [str(e) for e in d.evolution()['tests']] # RemoveDBIndexChangeModel
["ChangeField('TestModel', 'int_field1', initial=None, db_index=False)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # RemoveDBIndexChangeModel
%(RemoveDBIndexChangeModel)s
# Adding a unique constraint
>>> class AddUniqueChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=True)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', AddUniqueChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', AddUniqueChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field4':
Property 'unique' has changed
>>> print [str(e) for e in d.evolution()['tests']] # AddUniqueChangeModel
["ChangeField('TestModel', 'int_field4', initial=None, unique=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # AddUniqueChangeModel
%(AddUniqueChangeModel)s
# Remove a unique constraint
>>> class RemoveUniqueChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=False)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', RemoveUniqueChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', RemoveUniqueChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'int_field3':
Property 'unique' has changed
>>> print [str(e) for e in d.evolution()['tests']] # RemoveUniqueChangeModel
["ChangeField('TestModel', 'int_field3', initial=None, unique=False)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # RemoveUniqueChangeModel
%(RemoveUniqueChangeModel)s
# Changing more than one attribute at a time (on different fields)
>>> class MultiAttrChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column2')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=35)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', MultiAttrChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', MultiAttrChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field2':
Property 'null' has changed
In field 'int_field':
Property 'db_column' has changed
In field 'char_field':
Property 'max_length' has changed
>>> print [str(e) for e in d.evolution()['tests']] # MultiAttrChangeModel
["ChangeField('TestModel', 'char_field2', initial=None, null=True)", "ChangeField('TestModel', 'int_field', initial=None, db_column='custom_db_column2')", "ChangeField('TestModel', 'char_field', initial=None, max_length=35)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # MultiAttrChangeModel
%(MultiAttrChangeModel)s
# Changing more than one attribute at a time (on one fields)
>>> class MultiAttrSingleFieldChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=35, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', MultiAttrSingleFieldChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', MultiAttrSingleFieldChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> print d
In model tests.TestModel:
In field 'char_field2':
Property 'max_length' has changed
Property 'null' has changed
>>> print [str(e) for e in d.evolution()['tests']] # MultiAttrSingleFieldChangeModel
["ChangeField('TestModel', 'char_field2', initial=None, max_length=35, null=True)"]
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> for mutation in d.evolution()['tests']:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # MultiAttrSingleFieldChangeModel
%(MultiAttrSingleFieldChangeModel)s
# Redundant attributes. (Some attribute have changed, while others haven't but are specified anyway.)
>>> class RedundantAttrsChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column3')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = models.IntegerField(unique=False)
... char_field = models.CharField(max_length=35)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=True)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', RedundantAttrsChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', RedundantAttrsChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> test_sig = copy.deepcopy(start_sig)
>>> test_sql = []
>>> evolutions = [
... ChangeField("TestModel", "char_field2", initial=None, null=True, max_length=30),
... ChangeField("TestModel", "int_field", initial=None, db_column="custom_db_column3", primary_key=False, unique=False, db_index=False),
... ChangeField("TestModel", "char_field", initial=None, max_length=35),
... ]
>>> for mutation in evolutions:
... test_sql.extend(mutation.mutate('tests', test_sig))
... mutation.simulate('tests', test_sig)
>>> Diff(test_sig, end_sig).is_empty()
True
>>> execute_test_sql(start, end, test_sql, database='db_multi', app_label='tests') # RedundantAttrsChangeModel
%(RedundantAttrsChangeModel)s
# Change field type to another type with same internal_type
>>> class MyIntegerField(models.IntegerField):
... def get_internal_type(self):
... return 'IntegerField'
>>> class MinorFieldTypeChangeModel(models.Model):
... my_id = models.AutoField(primary_key=True)
... alt_pk = models.IntegerField()
... int_field = models.IntegerField(db_column='custom_db_column')
... int_field1 = models.IntegerField(db_index=True)
... int_field2 = models.IntegerField(db_index=False)
... int_field3 = models.IntegerField(unique=True)
... int_field4 = MyIntegerField(unique=False)
... char_field = models.CharField(max_length=20)
... char_field1 = models.CharField(max_length=25, null=True)
... char_field2 = models.CharField(max_length=30, null=False)
... m2m_field1 = models.ManyToManyField(ChangeAnchor1, db_table='multi_db_non-default_m2m_table')
>>> end = register_models_multi('tests', 'db_multi', ('TestModel', MinorFieldTypeChangeModel), *anchors)
>>> end_sig = test_proj_sig_multi('tests', ('TestModel', MinorFieldTypeChangeModel), *anchors)
>>> d = Diff(start_sig, end_sig)
>>> d.is_empty()
True
# Clean up after the applications that were installed
>>> deregister_models('tests')
""" % test_sql_mapping('multi_db', db_name='db_multi')
| bsd-3-clause | 8,471,837,691,026,984,000 | 41.652111 | 225 | 0.687769 | false | 3.223542 | true | false | false |
StephenKinger/privaan | setup.py | 1 | 5110 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import os
# notez qu'on import la lib
# donc assurez-vous que l'importe n'a pas d'effet de bord
import privaan
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
# PUT YOUR POST-INSTALL SCRIPT HERE or CALL A FUNCTION
os.system("chmod +x /etc/init.d/privaanservice")
develop.run(self)
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
# PUT YOUR POST-INSTALL SCRIPT HERE or CALL A FUNCTION
username = raw_input("Enter the username for sender: ")
password = raw_input("Enter the password for sender: ")
sender_email = raw_input("Enter the email for the sender: ")
sender_receivers = raw_input("Enter the emails for receivers (coma separated): ")
user_max_box_api_key = raw_input("Enter the map box api key (https://www.mapbox.com): ")
os.system("rm privaan/config.py && touch privaan/config.py")
f = open('privaan/config.py', 'w')
f.write('username = \''+username+'\'\n')
f.write('password = \''+password+'\'\n')
f.write('fromaddr = \''+sender_email+'\'\n')
f.write('toaddrs = \''+sender_receivers+'\'\n')
f.write('map_box_api_key = \''+user_max_box_api_key+'\'\n')
f.close()
install.run(self)
os.system("chmod +x /etc/init.d/privaanservice")
os.system("update-rc.d privaanservice defaults")
os.system("/etc/init.d/privaanservice start")
# Ceci n'est qu'un appel de fonction. Mais il est trèèèèèèèèèèès long
# et il comporte beaucoup de paramètres
setup(
# le nom de votre bibliothèque, tel qu'il apparaitre sur pypi
name='privaan',
# la version du code
version=privaan.__version__,
# Liste les packages à insérer dans la distribution
# plutôt que de le faire à la main, on utilise la foncton
# find_packages() de setuptools qui va cherche tous les packages
# python recursivement dans le dossier courant.
# C'est pour cette raison que l'on a tout mis dans un seul dossier:
# on peut ainsi utiliser cette fonction facilement
packages=find_packages(),
# votre pti nom
author="Stephen KINGER",
# Votre email, sachant qu'il sera publique visible, avec tous les risques
# que ça implique.
author_email="",
# Une description courte
description="Tool to monitor apache logs and notify on connexions",
# Une description longue, sera affichée pour présenter la lib
# Généralement on dump le README ici
long_description=open('README.md').read(),
# Vous pouvez rajouter une liste de dépendances pour votre lib
# et même préciser une version. A l'installation, Python essayera de
# les télécharger et les installer.
#
# Ex: ["gunicorn", "docutils >= 0.3", "lxml==0.5a7"]
#
# Dans notre cas on en a pas besoin, donc je le commente, mais je le
# laisse pour que vous sachiez que ça existe car c'est très utile.
# install_requires= ,
# Active la prise en compte du fichier MANIFEST.in
include_package_data=True,
# Une url qui pointe vers la page officielle de votre lib
url='http://github.com/StephenKinger/privaan',
# Il est d'usage de mettre quelques metadata à propos de sa lib
# Pour que les robots puissent facilement la classer.
# La liste des marqueurs autorisées est longue:
# https://pypi.python.org/pypi?%3Aaction=list_classifiers.
#
# Il n'y a pas vraiment de règle pour le contenu. Chacun fait un peu
# comme il le sent. Il y en a qui ne mettent rien.
classifiers=[
"Programming Language :: Python",
"Development Status :: WIP",
"License :: OSI Approved",
"Natural Language :: English",
"Operating System :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Security",
],
install_requires=['mock>=2.0.0','pygtail>=0.7.0','docopt>=0.6.2','requests>=2.12.4'],
data_files=[('/etc/init.d', ['daemon/privaanservice'])],
# C'est un système de plugin, mais on s'en sert presque exclusivement
# Pour créer des commandes, comme "django-admin".
# Par exemple, si on veut créer la fabuleuse commande "proclame-sm", on
# va faire pointer ce nom vers la fonction proclamer(). La commande sera
# créé automatiquement.
# La syntaxe est "nom-de-commande-a-creer = package.module:fonction".
entry_points = {
'console_scripts': [
'privaan = privaan:privaan_run',
],
},
# A fournir uniquement si votre licence n'est pas listée dans "classifiers"
# ce qui est notre cas
license="MIT",
# Il y a encore une chiée de paramètres possibles, mais avec ça vous
# couvrez 90% des besoins
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
)
| mit | -9,133,602,008,088,023,000 | 35.731884 | 96 | 0.65654 | false | 3.148447 | false | false | false |
plinecom/JobManager | gui/submit/fileinfo/fileinfo.py | 1 | 1576 | from PyQt4 import QtGui, QtCore
import gui.submit.fileinfo.common
import gui.submit.fileinfo.maya1
import gui.submit.fileinfo.maya2
import gui.submit.fileinfo.maya_mentalray
import gui.submit.fileinfo.nuke
class FileInfoPanel(QtGui.QTabWidget):
def __init__(self, job_list, dispatcher_list, config_info, parent=None):
QtGui.QTabWidget.__init__(self)
self._parent = parent
self._joblist = job_list
self._dipatcherList = dispatcher_list
self._configInfo = config_info
self.update_ui()
def update_ui(self):
self.clear()
job_common_panel = gui.submit.fileinfo.common.CommonPanel(self._joblist, self._parent)
self.addTab(job_common_panel, "fileinfo")
if "Maya" in self._joblist.get_current_job().getvalue("[*].*.software")[0]:
maya_panel1 = gui.submit.fileinfo.maya1.MayaPanel(self._joblist, self._parent)
self.addTab(maya_panel1, "Maya1")
maya_panel2 = gui.submit.fileinfo.maya2.MayaPanel(self._joblist, self._parent)
self.addTab(maya_panel2, "Maya2")
if "mentalray" in self._joblist.get_current_job().getvalue("[*].*.renderer")[0]:
mentalray_panel = gui.submit.fileinfo.maya_mentalray.MentalrayPanel(self._joblist, self._parent)
self.addTab(mentalray_panel, "Mentalray")
elif "Nuke" in self._joblist.get_current_job().getvalue("[*].*.software")[0]:
nuke_panel = gui.submit.fileinfo.nuke.NukePanel(self._joblist, self._parent)
self.addTab(nuke_panel, "Nuke")
| mit | 884,643,911,518,618,900 | 40.473684 | 112 | 0.654188 | false | 3.216327 | false | false | false |
aziele/alfpy | tests/test_word_distance.py | 1 | 10504 | import unittest
from alfpy import word_pattern
from alfpy import word_vector
from alfpy import word_distance
from alfpy.utils import distmatrix
from . import utils
class DistanceTest(unittest.TestCase, utils.ModulesCommonTest):
def __init__(self, *args, **kwargs):
super(DistanceTest, self).__init__(*args, **kwargs)
utils.ModulesCommonTest.set_test_data()
self.pattern = word_pattern.create(self.dna_records.seq_list, 2)
self.counts = word_vector.Counts(self.dna_records.length_list,
self.pattern)
self.freqs = word_vector.Freqs(self.dna_records.length_list,
self.pattern)
def test_angle_cos_diss_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'angle_cos_diss')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.2797355 0.1500672',
'seq2 0.2797355 0.0000000 0.1261027',
'seq3 0.1500672 0.1261027 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_angle_cos_evol_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'angle_cos_evol')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.3281368 0.1625980',
'seq2 0.3281368 0.0000000 0.1347925',
'seq3 0.1625980 0.1347925 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_add_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_add')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0810458 0.0507937',
'seq2 0.0810458 0.0000000 0.0526611',
'seq3 0.0507937 0.0526611 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult1_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_mult1')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0621975 0.0501075',
'seq2 0.0621975 0.0000000 0.0955847',
'seq3 0.0501075 0.0955847 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult2_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_mult2')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0621975 0.0404611',
'seq2 0.0621975 0.0000000 0.0531478',
'seq3 0.0404611 0.0531478 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_seqlen1_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_seqlen1')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0065879 0.0032065',
'seq2 0.0065879 0.0000000 0.0041065',
'seq3 0.0032065 0.0041065 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_seqlen2_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_seqlen2')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0072101 0.0038263',
'seq2 0.0072101 0.0000000 0.0039866',
'seq3 0.0038263 0.0039866 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_manhattan_freqs(self):
dist = word_distance.Distance(self.freqs, 'manhattan')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 1.2156863 0.7619048",
"seq2 1.2156863 0.0000000 0.7899160",
"seq3 0.7619048 0.7899160 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_chebyshev_freqs(self):
dist = word_distance.Distance(self.freqs, 'chebyshev')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.1936275 0.1250000",
"seq2 0.1936275 0.0000000 0.1428571",
"seq3 0.1250000 0.1428571 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_braycurtis_freqs(self):
dist = word_distance.Distance(self.freqs, 'braycurtis')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.6078431 0.3809524",
"seq2 0.6078431 0.0000000 0.3949580",
"seq3 0.3809524 0.3949580 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult_freqs(self):
dist = word_distance.Distance(self.freqs, 'diff_abs_mult')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.0621975 0.0404611",
"seq2 0.0621975 0.0000000 0.0531478",
"seq3 0.0404611 0.0531478 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_kld_freqs(self):
dist = word_distance.Distance(self.freqs, 'kld')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.0932800 0.0435210",
"seq2 0.0932800 0.0000000 0.0447391",
"seq3 0.0435210 0.0447391 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_lcc_freqs(self):
dist = word_distance.Distance(self.freqs, 'lcc')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.6205496 0.4017554",
"seq2 0.6205496 0.0000000 0.2550506",
"seq3 0.4017554 0.2550506 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_canberra_freqs(self):
dist = word_distance.Distance(self.freqs, 'canberra')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 10.3372258 7.1836838",
"seq2 10.3372258 0.0000000 6.6280959",
"seq3 7.1836838 6.6280959 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_minkowski_freqs(self):
dist = word_distance.Distance(self.freqs, 'minkowski')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.3763512 0.2532387",
"seq2 0.3763512 0.0000000 0.2603008",
"seq3 0.2532387 0.2603008 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_minkowski_throws_exception(self):
dist = word_distance.Distance(self.freqs, 'minkowski')
with self.assertRaises(Exception) as context:
dist.pwdist_minkowski(0, 1, 0.2)
self.assertIn('p must be at least 1', str(context.exception))
def test_jsd_freqs(self):
dist = word_distance.Distance(self.freqs, 'jsd')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.4608882 0.2550278",
"seq2 0.4608882 0.0000000 0.2457790",
"seq3 0.2550278 0.2457790 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_squared_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_squared')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.1416402 0.0641298',
'seq2 0.1416402 0.0000000 0.0677565',
'seq3 0.0641298 0.0677565 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_norm_counts(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.counts, 'euclid_norm')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 7.5498344 5.4772256',
'seq2 7.5498344 0.0000000 4.3588989',
'seq3 5.4772256 4.3588989 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_norm_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_norm')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.3763512 0.2532387',
'seq2 0.3763512 0.0000000 0.2603008',
'seq3 0.2532387 0.2603008 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_google_freqs(self):
dist = word_distance.Distance(self.freqs, 'google')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.6078431 0.3809524',
'seq2 0.6078431 0.0000000 0.3949580',
'seq3 0.3809524 0.3949580 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
if __name__ == '__main__':
unittest.main()
| mit | 4,758,581,940,622,395,000 | 43.134454 | 77 | 0.564261 | false | 3.378578 | true | false | false |
mitsuhiko/celery | celery/contrib/batches.py | 1 | 1489 | from itertools import count
from collections import deque, defaultdict
from celery.task.base import Task
class Batches(Task):
abstract = True
flush_every = 10
def __init__(self):
self._buffer = deque()
self._count = count().next
def execute(self, wrapper, pool, loglevel, logfile):
self._buffer.append((wrapper, pool, loglevel, logfile))
if not self._count() % self.flush_every:
self.flush(self._buffer)
self._buffer.clear()
def flush(self, tasks):
for wrapper, pool, loglevel, logfile in tasks:
wrapper.execute_using_pool(pool, loglevel, logfile)
class Counter(Task):
abstract = True
flush_every = 10
def __init__(self):
self._buffer = deque()
self._count = count().next
def execute(self, wrapper, pool, loglevel, logfile):
self._buffer.append((wrapper.args, wrapper.kwargs))
if not self._count() % self.flush_every:
self.flush(self._buffer)
self._buffer.clear()
def flush(self, buffer):
raise NotImplementedError("Counters must implement 'flush'")
class ClickCounter(Task):
flush_every = 1000
def flush(self, buffer):
urlcount = defaultdict(lambda: 0)
for args, kwargs in buffer:
urlcount[kwargs["url"]] += 1
for url, count in urlcount.items():
print(">>> Clicks: %s -> %s" % (url, count))
# increment_in_db(url, n=count)
| bsd-3-clause | -4,240,570,643,636,673,000 | 25.589286 | 68 | 0.599731 | false | 3.991957 | false | false | false |
dgfree/Minesweeper-Clone | screen.py | 1 | 6335 | # -*- coding: utf-8 -*-
"""
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
date: Mon Jan 19 14:52:04 2015
@author: daniel
"""
import pygame
from block import Block
from constants import *
class Screen:
FLAG_LOCATION = "img/flag.png"
MINE_LOCATION = "img/mine.png"
RED_X_LOCATION = "img/red_x.png"
BLOCK_COLOR = GRAY
CLICKED_BLOCK_COLOR = DARKGRAY
BACKGROUND_COLOR = BLACK
MINE_COLOR = RED
# Bottom portion of screen to display counters, time, etc.
INFO_HEIGHT = 75
def __init__(self, grid):
# Screen elements
self.screen_size = self._get_screen_size(grid)
self.screen = pygame.display.set_mode(self.screen_size)
#TODO: Grid class's/block classes initialization change
self.block_font_size = int(0.7 * Block.WIDTH)
self.display_font_size = int(grid.row_count * 1.2 - grid.col_count*.2)
pygame.display.set_caption("Minesweeper Alpha")
self.word_font = pygame.font.SysFont('Arial',
self.display_font_size,
True, False)
self.block_font = pygame.font.SysFont('Courier',
self.block_font_size,
True, False)
self.flag_image = pygame.image.load(self.FLAG_LOCATION)
self.mine_image = pygame.image.load(self.MINE_LOCATION)
self.red_x_image = pygame.image.load(self.RED_X_LOCATION)
self.initial_draw(grid)
def initial_draw(self, grid):
self.screen.fill(self.BACKGROUND_COLOR)
for row in range(grid.row_count):
for col in range(grid.col_count):
self._draw_empty_block(row, col, self.BLOCK_COLOR)
self._display_flag_counter(0)
self._display_mine_counter(grid.mine_count)
self._display_time_counter(0)
def draw_grid(self, grid):
for row in grid.blocks:
for block in row:
self._draw_block(block)
def game_over(self, grid):
self._draw_empty_block(grid.last_clicked_block.row,
grid.last_clicked_block.col, self.MINE_COLOR)
grid.reveal_mines_and_flags()
self.draw_grid(grid)
self._display_text("You lose!", 10)
self._display_text("Left click to restart.", 30)
self._display_text("Right click to quit.", 50)
def victory_screen(self, grid):
grid.reveal_mines_and_flags()
self.draw_grid(grid)
self._display_text("You win!", 10)
self._display_text("Left click to restart.", 30)
self._display_text("Right click to quit.", 50)
def _get_screen_size(self, grid):
screen_height = grid.row_count * (Block.HEIGHT + Block.MARGIN) + \
Block.MARGIN + self.INFO_HEIGHT
screen_width = grid.col_count * (Block.WIDTH + Block.MARGIN) + \
Block.MARGIN
return (screen_width, screen_height)
def _draw_empty_block(self, row, col, color):
# TODO: Fix this. Since the blocks aren't generated until after
# the user clicks, we have to do it like this for now. Perhaps
# we can find a different way to initialize blocks.
pygame.draw.rect(self.screen, color,
(col * Block.WIDTH + (col + 1) *
Block.MARGIN,
row * Block.HEIGHT + (row + 1) *
Block.MARGIN, Block.WIDTH, Block.HEIGHT))
def _draw_block(self, block):
if block.is_revealed:
if not block.is_mine and not block.flagged:
self._draw_empty_block(block.row, block.col,
self.CLICKED_BLOCK_COLOR)
if block.mine_neighbor_count > 0:
self._draw_block_number(block)
elif block.is_mine and not block.flagged:
self._draw_mine(block)
elif block.flagged and not block.is_mine:
self._draw_mine(block)
self._draw_image(self.red_x_image, block)
else:
if block.flagged:
self._draw_image(self.flag_image, block)
elif not block.flagged:
self._draw_empty_block(block.row, block.col, self.BLOCK_COLOR)
def _draw_block_number(self, block):
text = self.block_font.render(str(block.mine_neighbor_count),
True, block.color)
self.screen.blit(text, [block.x + 7, block.y + 3])
def _draw_mine(self, block):
self._draw_image(self.mine_image, block)
def _draw_image(self, image, block):
self.screen.blit(image, (block.x, block.y, block.WIDTH, block.HEIGHT))
def _display_text(self, string, y_offset):
y0 = self.screen_size[1] - self.INFO_HEIGHT + y_offset
text = self.word_font.render(string, True, WHITE)
text_loc = self._get_centered_text(string, y0)
pygame.draw.rect(self.screen, BLACK, text_loc)
self.screen.blit(text, text_loc)
def _get_centered_text(self, string, y):
text = self.word_font.render(string, True, WHITE)
textpos = text.get_rect()
textpos.centerx = self.screen_size[0] // 2
textpos.centery = y
return textpos
def _display_time_counter(self, time):
y_offset = 40
self._display_counter("TIME: ", time, y_offset)
def _display_mine_counter(self, mine_count):
y_offset = 20
self._display_counter("MINES: ", mine_count, y_offset)
def _display_flag_counter(self, flag_count):
y_offset = 0
self._display_counter("FLAGS: ", flag_count, y_offset)
def _display_counter(self, prestring, count, y_offset):
x0 = 0
y0 = self.screen_size[1] - self.INFO_HEIGHT + y_offset
string = prestring + str(count)
text = self.word_font.render(string, True, WHITE)
text_size = self.word_font.size(string)
pygame.draw.rect(self.screen, self.BACKGROUND_COLOR,
(x0, y0, text_size[0], text_size[1]))
self.screen.blit(text, [x0, y0, text_size[0], text_size[1]])
| mpl-2.0 | 1,783,015,336,399,862,800 | 35.408046 | 78 | 0.576006 | false | 3.579096 | false | false | false |
datadreamer/research-chronology-revisited | cgi-bin/pydeliciouslibs/feedparser/feedparser.py | 2 | 121876 | #!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.0.2"# + "$Revision: 1.88 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2005, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# timeoutsocket allows feedparser to time out rather than hang forever on ultra-slow servers.
# Python 2.3 now has this functionality available in the standard socket library, so under
# 2.3 or later you don't need to install anything. In fact, under Python 2.4, timeoutsocket
# write all sorts of crazy errors to stderr while running my unit tests, so it's probably
# outlived its usefulness.
import socket
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(20)
else:
try:
import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py
timeoutsocket.setDefaultSocketTimeout(20)
except ImportError:
pass
import urllib, urllib2
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding, 'utf-8', 'windows-1252'):
if proposed_encoding in tried_encodings: continue
if not proposed_encoding: continue
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = 1
use_strict_parser = 1
break
except:
pass
tried_encodings.append(proposed_encoding)
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
| mit | -7,598,199,181,045,882,000 | 42.02012 | 214 | 0.587925 | false | 3.606439 | false | false | false |
jhmatthews/cobra | source/plotspec.py | 1 | 2652 | #! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python
'''
Generic plotting script for PYTHONRT
'''
import read_output as rd
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import cobra_sub as sub
rd.setpars()
def strip(character, string):
''' strip a character from a string'''
new_string = ""
for s in string:
if s != character:
new_string += s
return new_string
def plot_spec (filename, lmin, lmax, smooth = 1, nobs = 0, use = [], \
savename = "fig", yscale = "linear", xscale = "linear" , \
sources = False, Fnu = False):
'''
Function for plotting a spec file outputted from the radiative transfer code PYTHONRT
:INPUT:
filename string
name of file
lmin, lmax float
wavelength range in ANGSTROMS
nobs int
number of observes
smooth int
smoothing factor
use array
which observations to use
savename string
yscale, xscale string
lin or log scale
sources Bool
Plot sources or not
Fnu Bool
Is it an Fnu plot?
:OUTPUT:
Creates plot and opens in preview
'''
# default savename is filename
if savename == "fig":
savename = filename + ".png"
# create spec class from spec file
spec = rd.read_spec_file(filename)
if nobs == 0:
nobs = len(spec.spec)
# strip filenames of funny characters that TeX complains about
savename = strip("_", savename)
filename = strip("_", filename)
# default argument is to plot all observations
if len(use) == 0:
use = np.arange(nobs)
nuse = int(len(use))
# work out the dimensions of the plot
if nuse < 3:
ny = nuse
nx = 1
else:
nx = 2
ny = (len(use) + 1) / 2
# do we want to smooth? if so, do it!
if smooth > 1:
for i in use:
sub.smooth_spectrum( spec, smooth )
# now create figure
fig=plt.figure(figsize=(8.3,11.7),dpi=80)
fig.suptitle(filename,fontsize=24,fontweight='bold')
fig.subplots_adjust(hspace=0.3,wspace=0.2)
for i in range(nuse):
ax = fig.add_subplot( ny, nx, i)
if Fnu:
ax.plot(spec.freq, spec.spec[use[i]])
else:
ax.plot(spec.wavelength, spec.spec[use[i]])
ax.set_yscale(yscale)
ax.set_xscale(xscale)
plt.xlim(lmin, lmax)
plt.savefig(savename)
command = "open -a preview %s" % savename
os.system(command)
if sources:
fig=plt.figure(figsize=(8.3,11.7),dpi=80)
fig.suptitle(filename,fontsize=24,fontweight='bold')
fig.subplots_adjust(hspace=0.3,wspace=0.2)
return 0
filename = sys.argv[1]
nobs = int(sys.argv[2])
plot_spec(filename, 3000, 7000, smooth = 20, yscale = "log")
| gpl-2.0 | -2,380,292,950,757,573,600 | 16 | 95 | 0.647436 | false | 2.830309 | false | false | false |
newcastlemakerspace/mkrspc_web | data_migration.py | 1 | 2090 | import redis
import datetime
from site_config import REDIS_DB
import site_utils
import uuid
def _was_migration_applied(redis_conn, seq):
value = redis_conn.get('migration_%d' % seq)
if value is not None:
print "migration_%d - exists" % seq
return True
print "migration_%d - executing" % seq
return False
def _flag_migration_applied(redis_conn, seq):
print "migration_%d - done" % seq
# migration_201410241041
d = datetime.datetime
redis_conn.set('migration_%d' % seq, d.now().isoformat())
def migration_201410241041(redis_conn):
seq = 201410241041
if _was_migration_applied(redis_conn, seq):
return
print " - clear old auth cookies"
key_prefix_search = 'User_Auth_Cookie_*'
keys = redis_conn.keys(key_prefix_search)
for k in keys:
redis_conn.delete(k)
_flag_migration_applied(redis_conn, seq)
def migration_201411130948(redis_conn):
# wiki categories
seq = 201411130948
if _was_migration_applied(redis_conn, seq):
return
print " - re-init wiki"
su = site_utils.SiteUtils(redis_conn)
su.wu.create_wiki_root_category()
root_cat_id = su.wu.wiki_root_category()
misc_cat_id = su.wu.create_wiki_category(root_cat_id, "Misc.")
article_keys = redis_conn.keys('wiki_article_*')
for k in article_keys:
print k, len(k)
print k[13:]
if len(k) == 49:
uuid_sstr = k[13:]
art_id = uuid.UUID(uuid_sstr)
assert isinstance(art_id, uuid.UUID)
print " article: ", misc_cat_id, art_id
cat_articles_key = "wiki_category_articles_%s" % misc_cat_id
r.rpush(cat_articles_key, str(art_id))
else:
print " (not an article)"
print '-----------------------------'
_flag_migration_applied(redis_conn, seq)
if __name__ == '__main__':
print "Ruinning migrations for DB #%d" % REDIS_DB
r = redis.Redis(db=REDIS_DB)
assert isinstance(r, redis.Redis)
migration_201410241041(r)
migration_201411130948(r)
| gpl-3.0 | 2,701,100,883,988,187,000 | 23.022989 | 72 | 0.606699 | false | 3.328025 | false | false | false |
RyanBalfanz/reservoir-sampling-cli | sampler/command_line.py | 1 | 1294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import operator
import random
import sys
def get_parser():
parser = argparse.ArgumentParser("Randomly sample k items from an input S containing n items.")
parser.add_argument("infile", nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("outfile", nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument("-k", "--num-items", type=int, help="An integer number giving the size of the reservoir")
parser.add_argument("--preserve-order", action="store_true", help="Preserve input ordering")
return parser
def main(argv=None):
parser = get_parser()
args = parser.parse_args(argv)
N = args.num_items
reservoir = []
reservoir_ordered = []
for l, line in enumerate(args.infile):
if l < N:
reservoir.append(line)
reservoir_ordered.append((l, line))
elif l >= N and random.random() < N/float(l+1):
replace = random.randint(0, len(reservoir)-1)
reservoir[replace] = line
reservoir_ordered[replace] = (l, line)
if args.preserve_order:
for item in sorted(reservoir_ordered, key=operator.itemgetter(1)):
args.outfile.write(item[1])
else:
for item in reservoir:
args.outfile.write(item)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| mit | 1,964,912,365,201,834,000 | 28.409091 | 110 | 0.693972 | false | 3.066351 | false | false | false |
aldryn/aldryn-redirects | aldryn_redirects/admin.py | 1 | 8613 | from __future__ import unicode_literals
from tablib import Dataset
from django.conf import settings
from django.contrib import admin, messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _, ugettext
from parler.admin import TranslatableAdmin
from aldryn_translation_tools.admin import AllTranslationsMixin
from .forms import RedirectsImportForm, StaticRedirectsImportForm
from .models import Redirect, StaticRedirect, StaticRedirectInboundRouteQueryParam
class DeletionMixin(object):
actions = ['delete_selected']
def delete_selected(self, request, queryset):
max_items_deletion = getattr(settings, 'DATA_UPLOAD_MAX_NUMBER_FIELDS', 1000) # COMPAT: Django<1.10
if queryset.count() > max_items_deletion:
msg = _('Too many items for deletion. Only first {qty} items were deleted.').format(qty=max_items_deletion)
self.message_user(request, msg, level=messages.WARNING)
# <Queryset>.delete() can not be used with sliced querysets
inner_qs = queryset.all()[:max_items_deletion]
queryset = queryset.filter(id__in=inner_qs)
deleted_qty = queryset.all().delete()[1]['aldryn_redirects.{}'.format(self.opts.model.__name__)]
object_label = self.opts.verbose_name_plural if deleted_qty > 1 else self.opts.verbose_name
msg = _('Successfully deleted {qty} {object_label}.').format(qty=deleted_qty, object_label=object_label)
self.message_user(request, msg)
delete_selected.short_description = _('Delete selected objects')
class RedirectAdmin(DeletionMixin, AllTranslationsMixin, TranslatableAdmin):
list_display = ('old_path',)
list_filter = ('site',)
search_fields = ('old_path', 'translations__new_path')
radio_fields = {'site': admin.VERTICAL}
export_filename = 'redirects-%Y-%m-%d.csv'
export_headers = ['Domain', 'Old', 'New', 'Language']
def get_urls(self):
from django.conf.urls import url
def pattern(regex, fn, name):
args = [regex, self.admin_site.admin_view(fn)]
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, name)
return url(*args, name=url_name)
url_patterns = [
pattern(r'export/$', self.export_view, 'export'),
pattern(r'import/$', self.import_view, 'import'),
]
return url_patterns + super(RedirectAdmin, self).get_urls()
def get_form(self, request, obj=None, **kwargs):
form = super(RedirectAdmin, self).get_form(request, obj=None, **kwargs)
site_field = form.base_fields['site']
# the add and change links don't work anyway with admin.VERTICAL radio
# fields
site_field.widget.can_add_related = False
site_field.widget.can_change_related = False
# if there is only one site, select it by default
if site_field.queryset.all().count() == 1:
site_field.initial = [site_field.queryset.get(), ]
return form
def export_view(self, request):
dataset = Dataset(headers=self.export_headers)
filename = timezone.now().date().strftime(self.export_filename)
redirects = self.get_queryset(request).prefetch_related('translations')
for r in redirects:
rows = []
for translation in r.translations.all():
rows.append([
r.site.domain,
r.old_path,
translation.new_path,
translation.language_code,
])
dataset.extend(rows)
response = HttpResponse(dataset.csv, content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
return response
def import_view(self, request):
form = RedirectsImportForm(
data=request.POST or None,
files=request.FILES or None,
)
opts = self.model._meta
if form.is_valid():
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, 'changelist')
success_url = 'admin:{}'.format(url_name)
form.do_import()
self.message_user(request, _('Redirects imported successfully.'))
return redirect(success_url)
context = {
'adminform': form,
'has_change_permission': True,
'media': self.media + form.media,
'opts': opts,
'root_path': reverse('admin:index'),
'current_app': self.admin_site.name,
'app_label': opts.app_label,
'title': ugettext('Import redirects'),
'original': ugettext('Import redirects'),
'errors': form.errors,
}
return render(request, 'admin/aldryn_redirects/redirect/import_form.html', context)
class StaticRedirectInboundRouteQueryParamInline(admin.TabularInline):
model = StaticRedirectInboundRouteQueryParam
verbose_name = _('Query Param')
verbose_name_plural = _('Query Params')
extra = 1
class StaticRedirectAdmin(DeletionMixin, admin.ModelAdmin):
inlines = [StaticRedirectInboundRouteQueryParamInline]
filter_horizontal = ('sites',)
list_filter = ('sites',)
list_display = ('inbound_route', 'outbound_route')
search_fields = list_display
# Custom attributes
export_filename = 'static-redirects-%Y-%m-%d.csv'
export_headers = ['domain', 'inbound_route', 'outbound_route']
def get_urls(self):
from django.conf.urls import url
def pattern(regex, fn, name):
args = [regex, self.admin_site.admin_view(fn)]
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, name)
return url(*args, name=url_name)
url_patterns = [
pattern(r'export/$', self.export_view, 'export'),
pattern(r'import/$', self.import_view, 'import'),
]
return url_patterns + super(StaticRedirectAdmin, self).get_urls()
def get_form(self, request, obj=None, **kwargs):
form = super(StaticRedirectAdmin, self).get_form(request, obj=None, **kwargs)
sites_field = form.base_fields['sites']
# the add and change links don't work anyway with admin.VERTICAL radio
# fields
sites_field.widget.can_add_related = False
sites_field.widget.can_change_related = False
# if there is only one site, select it by default
if sites_field.queryset.all().count() == 1:
sites_field.initial = [sites_field.queryset.get(), ]
return form
def export_view(self, request):
dataset = Dataset(headers=self.export_headers)
filename = timezone.now().date().strftime(self.export_filename)
for r in self.get_queryset(request):
rows = []
for site in r.sites.all():
rows.append([
site.domain,
r.get_full_inbound_route(),
r.outbound_route,
])
dataset.extend(rows)
response = HttpResponse(dataset.csv, content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
return response
def import_view(self, request):
form = StaticRedirectsImportForm(
data=request.POST or None,
files=request.FILES or None,
)
opts = self.model._meta
if form.is_valid():
url_name = "%s_%s_%s" % (self.opts.app_label, self.opts.model_name, 'changelist')
success_url = 'admin:{}'.format(url_name)
form.do_import()
self.message_user(request, _('Redirects imported successfully.'))
return redirect(success_url)
context = {
'adminform': form,
'has_change_permission': True,
'media': self.media + form.media,
'opts': opts,
'root_path': reverse('admin:index'),
'current_app': self.admin_site.name,
'app_label': opts.app_label,
'title': ugettext('Import redirects'),
'original': ugettext('Import redirects'),
'errors': form.errors,
}
return render(request, 'admin/aldryn_redirects/staticredirect/import_form.html', context)
admin.site.register(Redirect, RedirectAdmin)
admin.site.register(StaticRedirect, StaticRedirectAdmin)
| bsd-3-clause | 7,284,398,277,118,702,000 | 37.797297 | 119 | 0.615929 | false | 4.030416 | false | false | false |
ai-se/XTREE | src/Planners/XTREE/smote.py | 1 | 3196 | #! /Users/rkrsn/anaconda/bin/python
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from scipy.spatial.distance import euclidean
from random import choice, seed as rseed, uniform as rand
import pandas as pd
from tools.axe.table import *
def SMOTE(data=None, k=5, atleast=100, atmost=100, bugIndx=2, resample=False):
def Bugs(tbl):
cells = [i.cells[-bugIndx] for i in tbl._rows]
return cells
def minority(data):
unique = list(set(sorted(Bugs(data))))
counts = len(unique) * [0]
# set_trace()
for n in xrange(len(unique)):
for d in Bugs(data):
if unique[n] == d:
counts[n] += 1
return unique, counts
def knn(one, two):
pdistVect = []
# set_trace()
for ind, n in enumerate(two):
pdistVect.append([ind, euclidean(one.cells[:-1], n.cells[:-1])])
indices = sorted(pdistVect, key=lambda F: F[1])
return [two[n[0]] for n in indices]
def extrapolate(one, two):
new = one
# set_trace()
if bugIndx == 2:
new.cells[3:-1] = [max(min(a, b),
min(min(a, b) + rand() * (abs(a - b)),
max(a, b))) for a, b in zip(one.cells[3:-1],
two.cells[3:-1])]
new.cells[-2] = int(new.cells[-2])
else:
new.cells[3:] = [min(a, b) + rand() * (abs(a - b)) for
a, b in zip(one.cells[3:], two.cells[3:])]
new.cells[-1] = int(new.cells[-1])
return new
def populate(data):
newData = []
# reps = (len(data) - atleast)
for _ in xrange(atleast):
for one in data:
neigh = knn(one, data)[1:k + 1]
# If you're thinking the following try/catch statement is bad coding
# etiquette i i .
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
# data.extend(newData)
return newData
def depopulate(data):
if resample:
newer = []
for _ in xrange(atmost):
orig = choice(data)
newer.append(extrapolate(orig, knn(orig, data)[1]))
return newer
else:
return [choice(data) for _ in xrange(atmost)]
newCells = []
rseed(1)
unique, counts = minority(data)
rows = data._rows
for u, n in zip(unique, counts):
if n < atleast:
newCells.extend(populate([r for r in rows if r.cells[-2] == u]))
if n > atmost:
newCells.extend(depopulate([r for r in rows if r.cells[-2] == u]))
else:
newCells.extend([r for r in rows if r.cells[-2] == u])
return clone(data, rows=[k.cells for k in newCells])
def test_smote():
dir = '../Data/camel/camel-1.6.csv'
Tbl = createTbl([dir], _smote=False)
newTbl = createTbl([dir], _smote=True)
print(len(Tbl._rows), len(newTbl._rows))
# for r in newTbl._rows:
# print r.cells
if __name__ == '__main__':
test_smote()
| mit | -2,068,732,287,555,262,000 | 27.792793 | 78 | 0.572904 | false | 3.102913 | false | false | false |
MatthewWilkes/mw4068-packaging | src/melange/src/soc/models/timeline.py | 1 | 1544 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Timeline Model.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import linkable
class Timeline(linkable.Linkable):
"""The Timeline Model, representing the timeline for a Program.
"""
program_start = db.DateTimeProperty(
verbose_name=ugettext('Program Start date'))
program_end = db.DateTimeProperty(
verbose_name=ugettext('Program End date'))
org_signup_start = db.DateTimeProperty(
verbose_name=ugettext('Organization Signup Start date'))
org_signup_end = db.DateTimeProperty(
verbose_name=ugettext('Organization Signup End date'))
student_signup_start = db.DateTimeProperty(
verbose_name=ugettext('Student Signup Start date'))
student_signup_end = db.DateTimeProperty(
verbose_name=ugettext('Student Signup End date'))
| apache-2.0 | 4,690,625,934,440,829,000 | 28.692308 | 74 | 0.738342 | false | 3.879397 | false | false | false |
Nydareld/IaGameServer | Server/PlayerThread.py | 1 | 4282 | from Server.Game import *
from threading import Thread
from CodIa.tuto.models import User
from CodIa.tuto.app import db
import threading
import time
import random
class PlayerThread(Thread):
def __init__(self, GameThread, username, ia):
Thread.__init__(self)
self.GameThread = GameThread
self.username = username
GameThread.barrierTours._parties += 1
self.ia = ia
self.joueur = Player(ia,username,GameThread.game.gamesize)
#GameThread.game.joueurs[username]=Player(ia,username,GameThread.game.gamesize)
GameThread.joueursAAdd.append(self.joueur)
GameThread.nbth += 1
def run(self):
while True:
#attend le début du tours
# print("Barriere debut de tours "+str(threading.current_thread().name))
# print(self.GameThread.barrierTours.parties)
self.GameThread.barrierTours.wait()
#execute le code de l'IA
self.executeIa()
#print(self.GameThread.barrierEtape.parties)
self.GameThread.barrierEtape.wait()
self.calculePos()
self.GameThread.barrierEtape.wait()
agraille = self.join()
#print("avant acquire")
self.GameThread.barrierEtape.wait()
self.GameThread.lockmanger.acquire()
self.GameThread.aManger.append(agraille)
#print("pendant")
self.GameThread.lockmanger.release()
#print("après release")
self.GameThread.barrierManger.wait()
if self.joueur.poidTotal<=0 and not self.joueur.end:
self.joueur.end = True
print("\033[91m Le Joueur "+self.joueur.username +" à perdu \033[0m")
user = User.query.filter_by(pseudo=self.joueur.username).first()
if user is not None:
# print("\033[91m Zbra \033[0m")
user.score += self.joueur.score
db.session.commit()
# time.sleep(1/60)
# self.GameThread.nbth-=1
# self.GameThread.barrierTours._parties -= 1
def executeIa(self):
pass
def calculePos(self):
# print("\033[91m caca \033[0m")
# print(str(self.joueur.spheres[0].normeVitesse()) +" "+ str(self.joueur.spheres[0].normeVitesseMax()))
res=0
for sphere in self.joueur.spheres:
sphere.vectVitesse = sphere.vitesseNextTick()
if sphere.normeVitesse() > sphere.normeVitesseMax():
# print("\033[91m caca \033[0m")
sphere.vectVitesse[0] *= 0.9
sphere.vectVitesse[1] *= 0.9
# else :
# print("\033[92m non caca \033[0m")
sphere.vectPos = sphere.posNextTick()
rand = random.randint(1,300)
if sphere.taille > 50000 and rand==1:
sphere.split(self.joueur)
somme = 0
#print("=======================================================")
for joueur in self.GameThread.game.joueurs.values():
somme += joueur.poidTotal
#print("somme2"+str(somme))
#print("taille sphere max: "+str((sphere.taille)))
#pass
self.joueur.updateScore()
def join(self):
try:
listjoueur = dict()
for sphere in self.joueur.spheres:
for joueur2 in self.GameThread.game.joueurs.values():
for sphere2 in joueur2.spheres:
res = sphere.join(sphere2,joueur2)
if(res != None):
# if(not (listjoueur[res[0].username] in locals)):
# listjoueur[res[0].username] = []
try:
listjoueur[res[0].username].append(res[1])
except KeyError:
listjoueur[res[0].username] = []
listjoueur[res[0].username].append(res[1])
except RuntimeError:
print("\033[91m Nb de Thread :"+str(self.GameThread.barrierManger._parties)+", "+str(self.GameThread.nbth)+" \033[0m")
return listjoueur
| gpl-3.0 | 7,055,425,905,077,996,000 | 35.57265 | 131 | 0.535172 | false | 3.644804 | false | false | false |
atmega/ipkg-utils | ipkg.py | 1 | 11947 | #!/usr/bin/env python
# Copyright (C) 2001 Alexander S. Guy <[email protected]>
# Andern Research Labs
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA. */
#
# Copyright 2001, Russell Nelson <[email protected]>
# Added reading in of packages.
# Added missing package information fields.
# Changed render_control() to __repr__().
#
# Current Issues:
# The API doesn't validate package information fields. It should be
# throwing exceptions in the right places.
# Executions of tar could silently fail.
# Executions of tar *do* fail, and loudly, because you have to specify a full filename,
# and tar complains if any files are missing, and the ipkg spec doesn't require
# people to say "./control.tar.gz" or "./control" when they package files.
# It would be much better to require ./control or disallow ./control (either)
# rather than letting people pick. Some freedoms aren't worth their cost.
import tempfile
import os
import sys
import glob
import md5
import re
import string
import commands
from stat import ST_SIZE
class Package:
"""A class for creating objects to manipulate (e.g. create) ipkg
packages."""
def __init__(self, fn=None):
self.package = None
self.version = None
self.architecture = None
self.maintainer = None
self.source = None
self.description = None
self.depends = None
self.provides = None
self.replaces = None
self.conflicts = None
self.recommends = None
self.suggests = None
self.section = None
self.filename_header = None
self.file_list = []
self.md5 = None
self.size = None
self.installed_size = None
self.filename = None
self.isdeb = 0
if fn:
# see if it is deb format
f = open(fn, "r")
magic = f.read(4)
f.close()
if (magic == "!<ar"):
self.isdeb = 1
# compute the MD5.
f = open(fn, "r")
sum = md5.new()
while 1:
data = f.read(1024)
if not data: break
sum.update(data)
f.close()
if sys.version[:1] > '2':
# when using Python 2.0 or newer
self.md5 = sum.hexdigest()
else:
self.md5 = string.join(map((lambda x:"%02x" % ord(x)),sum.digest()),'')
stat = os.stat(fn)
self.size = stat[ST_SIZE]
self.filename = os.path.basename(fn)
## sys.stderr.write(" extracting control.tar.gz from %s\n"% (fn,))
if self.isdeb:
control = os.popen("ar p "+fn+" control.tar.gz | tar xfzO - --wildcards '*control'","r")
else:
control = os.popen("tar xfzO "+fn+" --wildcards '*control.tar.gz' | tar xfzO - --wildcards '*control'","r")
line = control.readline()
while 1:
if not line: break
line = string.rstrip(line)
lineparts = re.match(r'([\w-]*?):\s*(.*)', line)
if lineparts:
name = string.lower(lineparts.group(1))
value = lineparts.group(2)
while 1:
line = control.readline()
if not line: break
if line[0] != ' ': break
line = string.rstrip(line)
value = value + '\n' + line
# don't allow package to override its own filename
if name == "filename":
self.filename_header = value
else:
if self.__dict__.has_key(name):
self.__dict__[name] = value
else:
line = control.readline()
control.close()
if self.isdeb:
data = os.popen("ar p "+fn+" data.tar.gz | tar tfz -","r")
else:
data = os.popen("tar xfzO "+fn+" --wildcards '*data.tar.gz' | tar tfz -","r")
while 1:
line = data.readline()
if not line: break
self.file_list.append(string.rstrip(line))
data.close()
self.scratch_dir = None
self.file_dir = None
self.meta_dir = None
def read_control(self, control):
import os
line = control.readline()
while 1:
if not line: break
line = string.rstrip(line)
lineparts = re.match(r'([\w-]*?):\s*(.*)', line)
if lineparts:
name = string.lower(lineparts.group(1))
value = lineparts.group(2)
while 1:
line = control.readline()
if not line: break
if line[0] != ' ': break
value = value + '\n' + line
if name == 'size':
self.size = int(value)
elif self.__dict__.has_key(name):
self.__dict__[name] = value
if line[0] == '\n':
return # consumes one blank line at end of package descriptoin
else:
line = control.readline()
pass
return
def _setup_scratch_area(self):
self.scratch_dir = "%s/%sipkg" % (tempfile.gettempdir(),
tempfile.gettempprefix())
self.file_dir = "%s/files" % (self.scratch_dir)
self.meta_dir = "%s/meta" % (self.scratch_dir)
os.mkdir(self.scratch_dir)
os.mkdir(self.file_dir)
os.mkdir(self.meta_dir)
def set_package(self, package):
self.package = package
def get_package(self):
return self.package
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def set_architecture(self, architecture):
self.architecture = architecture
def get_architecture(self):
return self.architecture
def set_maintainer(self, maintainer):
self.maintainer = maintainer
def get_maintainer(self):
return self.maintainer
def set_source(self, source):
self.source = source
def get_source(self):
return self.source
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_depends(self, depends):
self.depends = depends
def get_depends(self, depends):
return self.depends
def set_provides(self, provides):
self.provides = provides
def get_provides(self, provides):
return self.provides
def set_replaces(self, replaces):
self.replaces = replaces
def get_replaces(self, replaces):
return self.replaces
def set_conflicts(self, conflicts):
self.conflicts = conflicts
def get_conflicts(self, conflicts):
return self.conflicts
def set_suggests(self, suggests):
self.suggests = suggests
def get_suggests(self, suggests):
return self.suggests
def set_section(self, section):
self.section = section
def get_section(self, section):
return self.section
def get_file_list(self):
return self.file_list
def write_package(self, dirname):
buf = self.render_control()
file = open("%s/control" % self.meta_dir, 'w')
file.write(buf)
self._setup_scratch_area()
cmd = "cd %s ; tar cvfz %s/control.tar.gz control" % (self.meta_dir,
self.scratch_dir)
cmd_out, cmd_in, cmd_err = os.popen3(cmd)
while cmd_err.readline() != "":
pass
cmd_out.close()
cmd_in.close()
cmd_err.close()
bits = "control.tar.gz"
if self.file_list:
cmd = "cd %s ; tar cvfz %s/data.tar.gz" % (self.file_dir,
self.scratch_dir)
cmd_out, cmd_in, cmd_err = os.popen3(cmd)
while cmd_err.readline() != "":
pass
cmd_out.close()
cmd_in.close()
cmd_err.close()
bits = bits + " data.tar.gz"
file = "%s_%s_%s.ipk" % (self.package, self.version, self.architecture)
cmd = "cd %s ; tar cvfz %s/%s %s" % (self.scratch_dir,
dirname,
file,
bits)
cmd_out, cmd_in, cmd_err = os.popen3(cmd)
while cmd_err.readline() != "":
pass
cmd_out.close()
cmd_in.close()
cmd_err.close()
def __repr__(self):
out = ""
# XXX - Some checks need to be made, and some exceptions
# need to be thrown. -- a7r
if self.package: out = out + "Package: %s\n" % (self.package)
if self.version: out = out + "Version: %s\n" % (self.version)
if self.depends: out = out + "Depends: %s\n" % (self.depends)
if self.provides: out = out + "Provides: %s\n" % (self.provides)
if self.replaces: out = out + "Replaces: %s\n" % (self.replaces)
if self.conflicts: out = out + "Conflicts: %s\n" % (self.conflicts)
if self.suggests: out = out + "Suggests: %s\n" % (self.suggests)
if self.recommends: out = out + "Recommends: %s\n" % (self.recommends)
if self.section: out = out + "Section: %s\n" % (self.section)
if self.architecture: out = out + "Architecture: %s\n" % (self.architecture)
if self.maintainer: out = out + "Maintainer: %s\n" % (self.maintainer)
if self.md5: out = out + "MD5Sum: %s\n" % (self.md5)
if self.size: out = out + "Size: %d\n" % int(self.size)
if self.installed_size: out = out + "InstalledSize: %d\n" % int(self.installed_size)
if self.filename: out = out + "Filename: %s\n" % (self.filename)
if self.source: out = out + "Source: %s\n" % (self.source)
if self.description: out = out + "Description: %s\n" % (self.description)
out = out + "\n"
return out
def __del__(self):
# XXX - Why is the `os' module being yanked out before Package objects
# are being destroyed? -- a7r
pass
class Packages:
"""A currently unimplemented wrapper around the ipkg utility."""
def __init__(self):
self.packages = {}
return
def add_package(self, pkg):
package = pkg.package
arch = pkg.architecture
name = ("%s:%s" % (package, arch))
if (not self.packages.has_key(name)):
self.packages[name] = pkg
(s, outtext) = commands.getstatusoutput("ipkg-compare-versions %s '>' %s" % (pkg.version, self.packages[name].version))
if (s == 0):
self.packages[name] = pkg
return 0
else:
return 1
def read_packages_file(self, fn):
f = open(fn, "r")
while 1:
pkg = Package()
pkg.read_control(f)
if pkg.get_package():
self.add_package(pkg)
else:
break
f.close()
return
def write_packages_file(self, fn):
f = open(fn, "w")
names = self.packages.keys()
names.sort()
for name in names:
f.write(self.packages[name].__repr__())
return
def keys(self):
return self.packages.keys()
def __getitem__(self, key):
return self.packages[key]
if __name__ == "__main__":
package = Package()
package.set_package("FooBar")
package.set_version("0.1-fam1")
package.set_architecture("arm")
package.set_maintainer("Testing <[email protected]>")
package.set_depends("libc")
package.set_description("A test of the APIs.")
print "<"
sys.stdout.write(package)
print ">"
package.write_package("/tmp")
| mit | -7,228,900,134,880,194,000 | 29.633333 | 131 | 0.572612 | false | 3.531481 | false | false | false |
Nanguage/BioInfoCollections | others/bedgraph2bed.py | 1 | 1815 | import pandas as pd
import click
def skip_lines(path):
n = 0
with open(path) as f:
for line in f:
if line.startswith("track"):
n += 1
else:
break
return n
def read_bed(path):
n_skip = skip_lines(path)
df = pd.read_table(path, sep="\t", header=None, skiprows=n_skip)
base_cols = ['chr', 'start', 'end']
n_col = len(df.columns)
if n_col == 4:
columns = base_cols + ['value']
else:
columns = base_cols
if n_col >= 6:
columns += ['name', 'score', 'strand']
if n_col >= 9:
columns += ['thickStart', 'thickEnd', 'itemRgb']
if n_col == 12:
columns += ['blockCount', 'blockSizes', 'blockStarts']
df.columns = columns
return df
def region_str(df):
ser = df.chr + '_' + df.start.map(str) + '_' + df.end.map(str)
return ser
@click.command()
@click.argument("bedgraph")
@click.argument("output")
@click.option("--ref-bed", "-r",
help="reference BED file.")
def bedgraph2bed(bedgraph, output, ref_bed):
"""
Expand bedGraph to BED.
Default expand to BED6, if set reference BED,
substitude the value section with bedgraph value.
"""
bg = read_bed(bedgraph)
if ref_bed:
ref_bed = read_bed(ref_bed)
outbed = ref_bed
bg.index = region_str(bg)
outbed.index = region_str(outbed)
outbed = outbed.loc[bg.index]
outbed.score = bg.value
else:
outbed = bg
outbed['name'] = '.'
outbed['score'] = bg.value
outbed['strand'] = '.'
outbed = outbed[['chr', 'start', 'end', 'name', 'score', 'strand']]
outbed.to_csv(output, header=False, sep="\t", index=False)
if __name__ == "__main__":
eval("bedgraph2bed()")
| gpl-3.0 | -4,500,874,446,086,020,600 | 24.928571 | 75 | 0.539394 | false | 3.282098 | false | false | false |
zhy0216/random-read | utils/image_crawler.py | 1 | 2041 | import os.path
import shutil
from urlparse import urlparse, urljoin
import base64
import requests
from bs4 import BeautifulSoup
from blue.settings import DOWNLOAD_IMAGE_FOLDER, IMAGE_PREFIX
''' download the image from the article '''
IMAGE_DOWNLOAD_FOLDER = DOWNLOAD_IMAGE_FOLDER
def get_absolute_url(article_url, image_url):
urlcomponent = urlparse(article_url)
host = urlcomponent.netloc
image_url = image_url.strip()
if image_url.startswith("http://") \
or image_url.startswith("https://"):
return image_url
if image_url.startswith("//"):
return "http:" + image_url
if image_url.startswith("/"):
return host + image_url
return urljoin(article_url, image_url)
def get_name(url):
name = base64.b64encode(url)
dot_index = url.rfind('.')
if dot_index < 0:
return None
question_mark_index = url.rfind('?')
if(question_mark_index > dot_index):
return name + url[dot_index:question_mark_index]
return name + url[dot_index:]
## this is export to use
def change_image(article):
soup = BeautifulSoup(article.content)
## ''.join(soup.body.contents)
for img in soup.find_all('img'):
src = img.get('src', None)
if src:
absolute_url = get_absolute_url(article.original_url, src)
name = get_name(absolute_url)
if name is None:
continue
img['src'] = IMAGE_PREFIX + name
# download image
# its better to use another worker
download_image(absolute_url, name)
## catch the image can be caught
article.content = ''.join(map(str, soup.body.contents))
article.save()
def download_image(image_url, new_name):
filename = IMAGE_DOWNLOAD_FOLDER + new_name
if os.path.isfile(filename):
return None
response = requests.get(image_url, stream=True)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
| mit | 6,987,141,486,954,231,000 | 24.835443 | 70 | 0.627634 | false | 3.843691 | false | false | false |
DrDos0016/z2 | museum_site/migrations/0011_auto_20170112_0208.py | 1 | 2385 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-12 02:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0010_auto_20160826_2152'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ['title']},
),
migrations.AlterModelOptions(
name='detail',
options={'ordering': ['detail']},
),
migrations.AlterModelOptions(
name='file',
options={'ordering': ['title']},
),
migrations.AlterModelOptions(
name='review',
options={'ordering': ['id']},
),
migrations.AddField(
model_name='file',
name='parent',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='company',
field=models.CharField(blank=True, default='', max_length=80, null=True),
),
migrations.AlterField(
model_name='file',
name='description',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='file',
name='details',
field=models.ManyToManyField(blank=True, default=None, to='museum_site.Detail'),
),
migrations.AlterField(
model_name='file',
name='release_date',
field=models.DateField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='review',
name='author',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='review',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='review',
name='ip',
field=models.GenericIPAddressField(blank=True, null=True),
),
]
| mit | -992,943,723,689,504,300 | 30.381579 | 93 | 0.534172 | false | 4.525617 | false | false | false |
jaeilepp/eggie | mne/coreg.py | 1 | 40760 | """Coregistration between different coordinate frames"""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from .externals.six.moves import configparser
import fnmatch
from glob import glob, iglob
import os
import re
import shutil
from warnings import warn
import numpy as np
from numpy import dot
from scipy.optimize import leastsq
from scipy.spatial.distance import cdist
from scipy.linalg import norm
from .io.meas_info import read_fiducials, write_fiducials
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
from .surface import (read_surface, write_surface, read_bem_surfaces,
write_bem_surface)
from .transforms import rotation, rotation3d, scaling, translation
from .utils import get_config, get_subjects_dir, logger, pformat
from functools import reduce
from .externals.six.moves import zip
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
def create_default_subject(mne_root=None, fs_home=None, update=False,
subjects_dir=None):
"""Create an average brain subject for subjects without structural MRI
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
mne_root : None | str
The mne root directory (only needed if MNE_ROOT is not specified as
environment variable).
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier.
:py:func:`create_default_subject` copies the relevant files from Freesurfer
into the current subjects_dir, and also adds the auxiliary files provided
by MNE.
The files provided by MNE are listed below and can be found under
``share/mne/mne_analyze/fsaverage`` in the MNE directory (see MNE manual
section 7.19 Working with the average brain):
fsaverage_head.fif:
The approximate head surface triangulation for fsaverage.
fsaverage_inner_skull-bem.fif:
The approximate inner skull surface for fsaverage.
fsaverage-fiducials.fif:
The locations of the fiducial points (LPA, RPA, and nasion).
fsaverage-trans.fif:
Contains a default MEG-MRI coordinate transformation suitable for
fsaverage.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
err = ("FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
raise ValueError(err)
if mne_root is None:
mne_root = get_config('MNE_ROOT', mne_root)
if mne_root is None:
err = ("MNE_ROOT environment variable not found. Please "
"specify the mne_root parameter in your call to "
"create_default_subject().")
raise ValueError(err)
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
err = ('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
raise IOError(err)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
err = ("Freesurfer fsaverage seems to be incomplete: No directory "
"named %s found in %s" % (name, fs_src))
raise IOError(err)
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
err = ("Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
raise IOError(err)
elif (not update) and os.path.exists(dest):
err = ("Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
raise IOError(err)
# make sure mne files exist
mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
'fsaverage', 'fsaverage-%s.fif')
mne_files = ('fiducials', 'head', 'inner_skull-bem', 'trans')
for name in mne_files:
fname = mne_fname % name
if not os.path.isfile(fname):
err = ("MNE fsaverage incomplete: %s file not found at "
"%s" % (name, fname))
raise IOError(err)
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
# add files from mne
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne directory...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
for name in mne_files:
if not os.path.exists(dest_fname % name):
shutil.copy(mne_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape = (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
# for each voxel, select one point
X, Y, Z = pts.T
out = np.empty((np.sum(H > 0), 3))
for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
x = xax[xbin]
y = yax[ybin]
z = zax[zbin]
xi = np.logical_and(X >= x, X < x + res)
yi = np.logical_and(Y >= y, Y < y + res)
zi = np.logical_and(Z >= z, Z < z + res)
idx = np.logical_and(zi, np.logical_and(yi, xi))
ipts = pts[idx]
mid = np.array([x, y, z]) + res / 2.
dist = cdist(ipts, [mid])
i_min = np.argmin(dist)
ipt = ipts[i_min]
out[i] = ipt
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(dot, trans)
return trans
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans'):
"""Find a transform that minimizes the squared distance between two
matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
One of the following, depending on the ``out`` parameter:
trans : array, shape = (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts.
params : array, shape = (n_params, )
A single tuple containing the translation, rotation and scaling
parameters in that order.
"""
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
err = ("src_pts and tgt_pts must have same shape "
"(got {0}, {1})".format(src_pts.shape, tgt_pts.shape))
raise ValueError(err)
rotate = bool(rotate)
translate = bool(translate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, False, 1):
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0, 1)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
scaling(s, s, s)))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
else:
err = ("The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
raise NotImplementedError(err)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
if not translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
err = ("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
raise ValueError(err)
def get_ras_to_neuromag_trans(nasion, lpa, rpa):
"""Construct a transformation matrix to the MNE head coordinate system
Construct a transformation matrix from an arbitrary RAS coordinate system
to the MNE head coordinate system, in which the x axis passes through the
two preauricular points, and the y axis passes through the nasion and is
normal to the x axis. (see mne manual, pg. 97)
Parameters
----------
nasion : array_like, shape = (3,)
Nasion point coordinate.
lpa : array_like, shape = (3,)
Left peri-auricular point coordinate.
rpa : array_like, shape = (3,)
Right peri-auricular point coordinate.
Returns
-------
trans : numpy.array, shape = (4, 4)
Transformation matrix to MNE head space.
"""
# check input args
nasion = np.asarray(nasion)
lpa = np.asarray(lpa)
rpa = np.asarray(rpa)
for pt in (nasion, lpa, rpa):
if pt.ndim != 1 or len(pt) != 3:
err = ("Points have to be provided as one dimensional arrays of "
"length 3.")
raise ValueError(err)
right = rpa - lpa
right_unit = right / norm(right)
origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit
anterior = nasion - origin
anterior_unit = anterior / norm(anterior)
superior_unit = np.cross(right_unit, anterior_unit)
x, y, z = -origin
origin_trans = translation(x, y, z)
trans_l = np.vstack((right_unit, anterior_unit, superior_unit, [0, 0, 0]))
trans_r = np.reshape([0, 0, 0, 1], (4, 1))
rot_trans = np.hstack((trans_l, trans_r))
trans = np.dot(rot_trans, origin_trans)
return trans
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_pts : array, shape = (m, 3)
Target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
Y = cdist(src_pts, tgt_pts, 'euclidean')
dist = Y.min(axis=1)
return dist
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel()
def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, x0=None, leastsq_args={}, out='params'):
"""Find a transform that minimizes the squared distance from each source
point to its closest target point
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (m, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : 0 | 1 | 3
Number of scaling parameters. With 0, points are not scaled. With 1,
points are scaled by the same factor along all axes. With 3, points are
scaled by a separate factor along each axis.
x0 : None | tuple
Initial values for the fit parameters.
leastsq_args : dict
Additional parameters to submit to :func:`scipy.optimize.leastsq`.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
x : array, shape = (n_params, )
Estimated parameters for the transformation.
Notes
-----
Assumes that the target points form a dense enough point cloud so that
the distance of each src_pt to the closest tgt_pt can be used as an
estimate of the distance of src_pt to tgt_pts.
"""
kwargs = {'epsfcn': 0.01}
kwargs.update(leastsq_args)
# assert correct argument types
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
translate = bool(translate)
rotate = bool(rotate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
try:
from sklearn.neighbors import BallTree
tgt_pts = BallTree(tgt_pts)
errfunc = _point_cloud_error_balltree
except ImportError:
warn("Sklearn could not be imported. Fitting points will be slower. "
"To improve performance, install the sklearn module.")
errfunc = _point_cloud_error
# for efficiency, define parameter specific error function
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
x0 = x0 or (0, 0, 0)
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 1):
x0 = x0 or (0, 0, 0, 1)
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 3):
x0 = x0 or (0, 0, 0, 1, 1, 1)
def error(x):
rx, ry, rz, sx, sy, sz = x
trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, True, 0):
x0 = x0 or (0, 0, 0, 0, 0, 0)
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
err = errfunc(est[:, :3], tgt_pts)
return err
else:
err = ("The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
raise NotImplementedError(err)
est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
msg)
if out == 'params':
return est
elif out == 'trans':
return _trans_from_params(param_info, est)
else:
err = ("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
raise ValueError(err)
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject='fsaverage', subjects_dir=None):
"""Find all files of an mri relevant for source transformation
Parameters
----------
subject : str
Name of the mri subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths | dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = surf = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('orig', 'orig_avg',
'inflated', 'inflated_avg', 'inflated_pre',
'pial', 'pial_avg',
'smoothwm',
'white', 'white_avg',
'sphere', 'sphere.reg', 'sphere.reg.avg')
for name in surf_names:
for hemi in ('lh.', 'rh.'):
fname = pformat(surf_fname, name=hemi + name)
surf.append(fname)
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
# fiducials
paths['fid'] = [fid_fname]
# duplicate curvature files
paths['duplicate'] = dup = []
path = os.path.join(surf_dirname, '{name}')
for name in ['lh.curv', 'rh.curv']:
fname = pformat(path, name=name)
dup.append(fname)
# check presence of required files
for ftype in ['surf', 'fid', 'duplicate']:
for fname in paths[ftype]:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
path = os.path.realpath(path)
if not os.path.exists(path):
raise IOError("Required file not found: %r" % path)
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
return paths
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if not os.path.exists(fname):
return False
return True
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_elp(fname):
"""Read point coordinates from a text file
Parameters
----------
fname : str
Absolute path to laser point file (*.txt).
Returns
-------
elp_points : array, [n_points x 3]
Point coordinates.
"""
pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
with open(fname) as fid:
elp_points = pattern.findall(fid.read())
elp_points = np.array(elp_points, dtype=float)
if elp_points.shape[1] != 3:
err = ("File %r does not contain 3 columns as required; got shape "
"%s." % (fname, elp_points.shape))
raise ValueError(err)
return elp_points
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
err = ("%r does not seem to be a scaled mri subject: %r does not "
"exist." % (subject, fname))
raise IOError(err)
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
subjects_dir = get_subjects_dir(subjects_dir, True)
if (subject_from is None) != (scale is None):
err = ("Need to provide either both subject_from and scale "
"parameters, or neither.")
raise TypeError(err)
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
scale = cfg['scale']
else:
scale = np.asarray(scale)
if scale.ndim == 0:
n_params = 1
elif scale.shape == (3,):
n_params = 3
else:
err = ("Invalid shape for scale parameer. Need scalar or array of "
"length 3. Got %s." % str(scale))
raise ValueError(err)
return subjects_dir, subject_from, n_params, scale
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None):
"""Scale a bem file
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, _, scale = _scale_params(subject_to,
subject_from, scale,
subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File alredy exists: %s" % dst)
surfs = read_bem_surfaces(src)
if len(surfs) != 1:
err = ("BEM file with more than one surface: %r" % src)
raise NotImplementedError(err)
surf0 = surfs[0]
surf0['rr'] = surf0['rr'] * scale
write_bem_surface(dst, surf0)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
"""Scale labels to match a brain that was previously created by scaling
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existsing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
# read parameters from cfg
if scale is None or subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
if subject_from is None:
subject_from = cfg['subject_from']
if scale is None:
scale = cfg['scale']
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None):
"""Create a scaled copy of an MRI subject
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
See Also
--------
scale_labels : add labels to a scaled MRI
scale_source_space : add a source space to a scaled MRI
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, subjects_dir=subjects_dir)
scale = np.asarray(scale)
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if overwrite:
shutil.rmtree(dest)
else:
err = ("Subject directory for %s already exists: "
"%r" % (subject_to, dest))
raise IOError(err)
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
# save MRI scaling parameters
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
# surf files [in mm]
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
# BEM files [in m]
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
# fiducials [in m]
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe)
# duplicate files
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
# source spaces
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir)
# labels [in m]
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1):
"""Scale a source space for an mri created with scale_mri()
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to eggie in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
"""
subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
subject_from,
scale,
subjects_dir)
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match("(oct|ico)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# prepare scaling parameters
if n_params == 1:
norm_scale = None
elif n_params == 3:
norm_scale = 1. / scale
else:
err = ("Invalid n_params entry in MRI cfg file: %s" % str(n_params))
raise RuntimeError(err)
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# distances and patch info
if norm_scale is None:
if ss['dist'] is not None:
ss['dist'] *= scale
ss['nearest_dist'] *= scale
ss['dist_limit'] *= scale
else:
nn = ss['nn']
nn *= norm_scale
norm = np.sqrt(np.sum(nn ** 2, 1))
nn /= norm[:, np.newaxis]
if ss['dist'] is not None:
add_dist = True
if add_dist:
logger.info("Recomputing distances, this might take a while")
dist_limit = np.asscalar(sss[0]['dist_limit'])
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
| bsd-2-clause | -7,231,553,313,191,468,000 | 34.259516 | 79 | 0.5934 | false | 3.698394 | true | false | false |
nthien/flaskup | flaskup/console.py | 2 | 1462 | # -*- coding: utf-8 -*-
import os
import argparse
from datetime import date
from flaskup.models import SharedFile
from flaskup.filters import filesizeformat
def action_clean(quiet):
today = date.today()
count = 0
deleted_files = []
for f in SharedFile.find_all():
if f.expire_date < today:
f.delete(notify=False)
count += 1
deleted_files.append(f)
if not quiet and count > 0:
print u'Files deleted: {0}'.format(count)
for info in deleted_files:
print u" - '{0}' - {1}".format(os.path.join(info.path, info.filename),
filesizeformat(info.size, True))
def list_actions():
from flaskup import console
attributes = dir(console)
actions = []
for attribute in attributes:
if attribute.startswith('action_'):
actions.append(attribute[7:])
return actions
def main():
# parse arguments
parser = argparse.ArgumentParser(description='Flaskup! command line tool.')
parser.add_argument('-q', '--quiet',
action='store_true',
help='quiet, print only errors')
choices = list_actions()
parser.add_argument('action', choices=choices)
args = parser.parse_args()
# quiet?
quiet = args.quiet
# call function
from flaskup import console
action = getattr(console, 'action_' + args.action)
action(quiet)
| bsd-3-clause | -4,315,482,644,957,705,700 | 25.581818 | 82 | 0.596443 | false | 4.189112 | false | false | false |
jimmy201602/webterminal | permission/commons.py | 1 | 3118 | from django.contrib.auth.models import Permission
from django.utils.translation import ugettext_lazy as _
def parse_permission_tree():
permission_tree = {}
permission_tree_list = []
queryset = Permission.objects.filter(content_type__app_label__in=[
'common', 'permission'], codename__contains='can_')
for i in ['common', 'permission']:
for p in queryset.filter(content_type__app_label=i):
if 'text' in permission_tree.keys():
if p.content_type.model not in [i['model'] for i in permission_tree['children']]:
permission_tree['children'].append({
"text": _(p.content_type.model),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'two',
'children': [{
"text": _(p.name),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"id": p.id,
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'three'
}]
})
else:
for i in permission_tree['children']:
if i['model'] == p.content_type.model:
permission_tree['children'][permission_tree['children'].index(i)]['children'].append({
"text": _(p.name),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"id": p.id,
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'three'
})
else:
permission_tree['text'] = i
permission_tree['level'] = 'one'
permission_tree['children'] = []
permission_tree['children'].append({
"text": _(p.content_type.model),
"icon": "fa fa-folder",
"app_label": p.content_type.app_label,
"model": p.content_type.model,
"state": {"selected": "!0"},
'level': 'two',
'children': [{
"text": _(p.name),
"icon": "fa fa-folder",
"state": {"selected": "!0"},
"id": p.id,
"app_label": p.content_type.app_label,
"model": p.content_type.model,
'level': 'three'
}]
})
permission_tree_list.append(permission_tree)
permission_tree = {}
return permission_tree_list
| gpl-3.0 | -1,413,970,611,561,117,400 | 46.242424 | 114 | 0.395125 | false | 4.849145 | false | false | false |
revesansparole/oacontainer | src/openalea/container/property_graph.py | 1 | 9223 | # -*- python -*-
#
# OpenAlea.Core
#
# Copyright 2006-2009 INRIA - CIRAD - INRA
#
# File author(s): Fred Boudon <[email protected]>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite: http://openalea.gforge.inria.fr
#
################################################################################
"""This module provide a set of concepts to add properties to graph elements.
TODO: stupid implementation that do not ensure that ids in properties are valid
graph elements.
"""
from graph import Graph, InvalidVertex, InvalidEdge
class InvalidProperty(Exception):
"""Exception used when a property is missing."""
pass
class PropertyGraph(Graph):
"""Simple implementation of PropertyGraph using
dict as properties and two dictionaries to
maintain these properties
"""
def __init__(self, graph=None, **kwds):
self._vertex_property = {}
self._edge_property = {}
self._graph_property = {}
Graph.__init__(self, graph, **kwds)
def vertex_property_names(self):
"""Names of properties associated to vertices.
return:
- (iter of str)
"""
return self._vertex_property.iterkeys()
def vertex_properties(self):
"""Iterate on all properties associated to vertices.
return:
- (iter of dict of (vid, any))
"""
return self._vertex_property.items()
def vertex_property(self, property_name):
"""Return a map between vid and data for all vertices where
property_name is defined
args:
- property_name (str): name identifier of the property
return:
- (dict of (vid, any))
"""
try:
return self._vertex_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on vertices"
% property_name)
def edge_property_names(self):
"""Names of properties associated to edges.
return:
- (iter of str)
"""
return self._edge_property.iterkeys()
def edge_properties(self):
"""Iterate on all properties associated to edges.
return:
- (iter of dict of (eid, any))
"""
return self._edge_property.items()
def edge_property(self, property_name):
"""Return a map between eid and data for all edges where
property_name is defined
args:
- property_name (str): name identifier of the property
return:
- (dict of (eid, any))
"""
try:
return self._edge_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on edges"
% property_name)
def graph_property_names(self):
"""Names of properties associated to the graph.
return:
- (iter of str)
"""
return self._graph_property.iterkeys()
def graph_properties(self):
"""Iterate on all properties associated to the graph.
return:
- (iter of (str, any))
"""
return self._graph_property.iteritems()
def graph_property(self, property_name):
"""Return the value of a property associated to the graph.
args:
- property_name (str): name identifier of the property
return:
- (any)
"""
try:
return self._graph_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on graph"
% property_name)
###########################################################
#
# mutable property concept
#
###########################################################
def add_vertex_property(self, property_name, values=None):
"""Add a new map between vid and a data.
args:
- property_name (str): name identifier for this property
- values (dict of (vid, any)): pre set values for some vertices.
If None (default), property will be emtpy.
"""
if property_name in self._vertex_property:
raise InvalidProperty("property %s is already defined on vertices"
% property_name)
if values is None:
values = {}
self._vertex_property[property_name] = values
def remove_vertex_property(self, property_name):
"""Remove a given property.
args:
- property_name (str): name identifier for this property
"""
try:
del self._vertex_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on vertices"
% property_name)
def add_edge_property(self, property_name, values=None):
"""Add a new map between eid and a data.
args:
- property_name (str): name identifier for this property
- values (dict of (eid, any)): pre set values for some edge.
If None (default), property will be emtpy.
"""
if property_name in self._edge_property:
raise InvalidProperty("property %s is already defined on edges"
% property_name)
if values is None:
values = {}
self._edge_property[property_name] = values
def remove_edge_property(self, property_name):
"""Remove a given property.
args:
- property_name (str): name identifier for this property
"""
try:
del self._edge_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on edges"
% property_name)
def add_graph_property(self, property_name, value=None):
"""Add a new property to the graph.
args:
- property_name (str): name identifier for the property
- value (any): value (defaut None) associated to this property
"""
if property_name in self._graph_property:
raise InvalidProperty("property %s is already defined on graph"
% property_name)
self._graph_property[property_name] = value
def remove_graph_property(self, property_name):
"""Remove a given property.
args:
- property_name (str): name identifier for this property
"""
try:
del self._graph_property[property_name]
except KeyError:
raise InvalidProperty("property %s is undefined on graph"
% property_name)
###########################################################
#
# mutable property concept
#
###########################################################
def remove_vertex(self, vid):
for prop in self._vertex_property.itervalues():
prop.pop(vid, None)
Graph.remove_vertex(self, vid)
# remove_vertex.__doc__ = Graph.remove_vertex.__doc__
def remove_edge(self, eid):
for prop in self._edge_property.itervalues():
prop.pop(eid, None)
Graph.remove_edge(self, eid)
# remove_edge.__doc__ = Graph.remove_edge.__doc__
def clear(self):
for prop in self._vertex_property.itervalues():
prop.clear()
for prop in self._edge_property.itervalues():
prop.clear()
self._graph_property.clear()
Graph.clear(self)
# clear.__doc__ = Graph.clear.__doc__
def clear_edges(self):
for prop in self._edge_property.itervalues():
prop.clear()
Graph.clear_edges(self)
# clear_edges.__doc__ = Graph.clear_edges.__doc__
def extend(self, graph):
# add and translate the vertex and edge ids of the second graph
trans_vid, trans_eid = Graph.extend(self, graph)
if isinstance(graph, PropertyGraph):
# update graph properties
for name, prop in graph.vertex_properties():
if name not in self.vertex_property_names():
self.add_vertex_property(name)
self_prop = self.vertex_property(name)
for vid, data in prop.items():
self_prop[trans_vid[vid]] = data
# update edge properties
for name, prop in graph.edge_properties():
if name not in self.edge_property_names():
self.add_edge_property(name)
self_prop = self.edge_property(name)
for eid, data in prop.items():
self_prop[trans_eid[eid]] = data
# update graph properties
for name, data in graph.graph_properties():
if name not in self.graph_properties():
self.add_graph_property(name, data)
return trans_vid, trans_eid
# extend.__doc__ = Graph.extend.__doc__
| mit | -70,432,156,574,563,224 | 31.475352 | 80 | 0.545809 | false | 4.620741 | false | false | false |
jds2001/sos | sos/plugins/mongodb.py | 1 | 1619 | # Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class MongoDb(Plugin, DebianPlugin, UbuntuPlugin):
"""MongoDB document database
"""
plugin_name = 'mongodb'
profiles = ('services',)
packages = ('mongodb-server',)
files = ('/etc/mongodb.conf',)
def setup(self):
self.add_copy_spec([
"/etc/mongodb.conf",
"/var/log/mongodb/mongodb.log",
"/var/log/containers/mongodb/mongodb.log"
])
def postproc(self):
self.do_file_sub(
"/etc/mongodb.conf",
r"(mms-token\s*=\s*.*)",
r"mms-token = ********"
)
class RedHatMongoDb(MongoDb, RedHatPlugin):
def setup(self):
super(RedHatMongoDb, self).setup()
self.add_copy_spec("/etc/sysconfig/mongodb")
# vim: set et ts=4 sw=4 :
| gpl-2.0 | 880,026,662,020,041,000 | 30.745098 | 73 | 0.662137 | false | 3.679545 | false | false | false |
wasade/american-gut-web | amgut/handlers/auth_handlers.py | 1 | 4373 | #!/usr/bin/env python
from tornado.web import authenticated
from tornado.escape import json_encode
from amgut.util import AG_DATA_ACCESS
from amgut.lib.mail import send_email
from amgut.handlers.base_handlers import BaseHandler
from amgut import media_locale, text_locale
# login code modified from https://gist.github.com/guillaumevincent/4771570
class AuthRegisterHandoutHandler(BaseHandler):
"""User Creation"""
@authenticated
def get(self):
latlong_db = AG_DATA_ACCESS.getMapMarkers()
self.render("register_user.html", skid=self.current_user,
latlongs_db=latlong_db, loginerror='')
@authenticated
def post(self):
skid = self.current_user
tl=text_locale['handlers']
info = {}
for info_column in ("email", "participantname", "address", "city",
"state", "zip", "country"):
# Make sure that all fields were entered
info[info_column] = self.get_argument(info_column, None)
# create the user if needed
ag_login_id = AG_DATA_ACCESS.addAGLogin(
info['email'], info['participantname'], info['address'],
info['city'], info['state'], info['zip'], info['country'])
# Create the kit and add the kit to the user
kitinfo = AG_DATA_ACCESS.getAGHandoutKitDetails(skid)
printresults = AG_DATA_ACCESS.checkPrintResults(skid)
if printresults is None:
printresults = 'n'
success = AG_DATA_ACCESS.addAGKit(
ag_login_id, skid, kitinfo['password'],
kitinfo['swabs_per_kit'], kitinfo['verification_code'],
printresults)
if success == -1:
self.redirect(media_locale['SITEBASE'] + '/db_error/?err=regkit')
return
# Add the barcodes
kitinfo = AG_DATA_ACCESS.getAGKitDetails(skid)
ag_kit_id = kitinfo['ag_kit_id']
results = AG_DATA_ACCESS.get_barcodes_from_handout_kit(skid)
for row in results:
barcode = row[0]
success = AG_DATA_ACCESS.addAGBarcode(ag_kit_id, barcode)
if success == -1:
self.redirect(media_locale['SITEBASE'] + '/db_error/?err=regbarcode')
return
# Email the verification code
subject = tl['AUTH_SUBJECT']
addendum = ''
if skid.startswith('PGP_'):
addendum = tl['AUTH_REGISTER_PGP']
body = tl['AUTH_REGISTER_BODY'].format(
kitinfo['kit_verification_code'], addendum)
result = tl['KIT_REG_SUCCESS']
try:
send_email(body, subject, recipient=info['email'],
sender=media_locale['HELP_EMAIL'])
except:
result = media_locale['EMAIL_ERROR']
self.render('help_request.html', skid=skid, result=result)
self.redirect(media_locale['SITEBASE'] + '/authed/portal/')
class AuthLoginHandler(BaseHandler):
"""user login, no page necessary"""
def post(self):
skid = self.get_argument("skid", "").strip()
password = self.get_argument("password", "")
tl = text_locale['handlers']
login = AG_DATA_ACCESS.authenticateWebAppUser(skid, password)
if login:
# everything good so log in
self.set_current_user(skid)
self.redirect(media_locale['SITEBASE'] + "/authed/portal/")
return
else:
is_handout = AG_DATA_ACCESS.handoutCheck(skid, password)
if is_handout == 'y':
# login user but have them register themselves
self.set_current_user(skid)
self.redirect(media_locale['SITEBASE'] + '/auth/register/')
return
else:
msg = tl['INVALID_KITID']
latlongs_db = AG_DATA_ACCESS.getMapMarkers()
self.render("index.html", user=None, loginerror=msg,
latlongs_db=latlongs_db)
return
def set_current_user(self, user):
if user:
self.set_secure_cookie("skid", json_encode(user))
else:
self.clear_cookie("skid")
class AuthLogoutHandler(BaseHandler):
"""Logout handler, no page necessary"""
def get(self):
self.clear_cookie("skid")
self.redirect(media_locale['SITEBASE'] + "/")
| bsd-3-clause | -3,059,562,258,704,055,000 | 36.059322 | 85 | 0.584038 | false | 3.822552 | false | false | false |
felipenaselva/repo.felipe | plugin.video.uwc/favorites.py | 1 | 2571 | '''
Ultimate Whitecream
Copyright (C) 2015 mortael
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urllib2, re, cookielib, os.path, sys, socket
import xbmc, xbmcplugin, xbmcgui, xbmcaddon, sqlite3
import utils
from chaturbate import clean_database as cleanchat
from cam4 import clean_database as cleancam4
dialog = utils.dialog
favoritesdb = utils.favoritesdb
conn = sqlite3.connect(favoritesdb)
c = conn.cursor()
try:
c.executescript("CREATE TABLE IF NOT EXISTS favorites (name, url, mode, image);")
c.executescript("CREATE TABLE IF NOT EXISTS keywords (keyword);")
except:
pass
conn.close()
def List():
if utils.addon.getSetting("chaturbate") == "true":
cleanchat()
cleancam4()
conn = sqlite3.connect(favoritesdb)
conn.text_factory = str
c = conn.cursor()
try:
c.execute("SELECT * FROM favorites")
for (name, url, mode, img) in c.fetchall():
utils.addDownLink(name, url, int(mode), img, '', '', 'del')
conn.close()
xbmcplugin.endOfDirectory(utils.addon_handle)
except:
conn.close()
utils.notify('No Favorites','No Favorites found')
return
def Favorites(fav,mode,name,url,img):
if fav == "add":
delFav(url)
addFav(mode, name, url, img)
utils.notify('Favorite added','Video added to the favorites')
elif fav == "del":
delFav(url)
utils.notify('Favorite deleted','Video removed from the list')
xbmc.executebuiltin('Container.Refresh')
def addFav(mode,name,url,img):
conn = sqlite3.connect(favoritesdb)
conn.text_factory = str
c = conn.cursor()
c.execute("INSERT INTO favorites VALUES (?,?,?,?)", (name, url, mode, img))
conn.commit()
conn.close()
def delFav(url):
conn = sqlite3.connect(favoritesdb)
c = conn.cursor()
c.execute("DELETE FROM favorites WHERE url = '%s'" % url)
conn.commit()
conn.close()
| gpl-2.0 | -3,299,589,569,647,617,000 | 28.215909 | 85 | 0.668222 | false | 3.693966 | false | false | false |
getefesto/efesto | tests/unit/handlers/BaseHandler.py | 1 | 1059 | # -*- coding: utf-8 -*-
from efesto.handlers import BaseHandler
from pytest import fixture
@fixture
def handler(magic):
handler = BaseHandler(magic())
handler.q = magic()
return handler
def test_basehandler_init(magic):
model = magic()
handler = BaseHandler(model)
assert handler.model == model
assert handler._order == model.id
def test_basehandler_embeds(handler, magic):
model = magic(one=magic(spec_set=['rel_model']))
handler.model = model
result = handler.embeds({'_embeds': 'one'})
handler.model.q.join.assert_called_with(model.one.rel_model, on=False)
assert result == ['one']
def test_basehandler_embeds_reverse(handler):
"""
Verifies that embeds work with backrefs.
"""
result = handler.embeds({'_embeds': 'one'})
model = handler.model
model.one.field = 'field'
handler.model.q.join.assert_called_with(model, on=False)
assert result == ['one']
def test_basehandler_embeds_none(handler):
result = handler.embeds({'_embeds': None})
assert result == []
| gpl-3.0 | 1,938,557,776,759,132,400 | 24.214286 | 74 | 0.663834 | false | 3.49505 | false | false | false |
qinjunjerry/PyKeyBox | keybox.py | 1 | 15148 | #!/usr/bin/env python
"""A mini key/password manager written in python using the AES encryption algorithm."""
import os
import sys
import time
import os.path
import random
import sqlite3
import hashlib
import getpass
import argparse
import Crypto.Cipher.AES
class KeyBox(object):
TABLE_NAME = "keybox"
MASTER_KEY_TITLE = "<MASTER>"
def __init__(self, a_file):
# the AES key of the master password, to encrypt key content
self.aes_key = None
self.conn = sqlite3.connect(a_file)
# Use 8-bit string instead of unicode string, in order to read/write
# international characters like Chinese
self.conn.text_factory = str
# The following line would use unicode string
# self.conn.text_factory = lambda x: unicode(x, 'utf-8', 'ignore')
self.cursor = self.conn.cursor()
self.cursor.execute('CREATE TABLE IF NOT EXISTS %s (title TEXT PRIMARY KEY, time LONG, content BLOB)' %
KeyBox.TABLE_NAME)
self.conn.commit()
def list(self):
title_time_list = []
self.cursor.execute('SELECT title,time FROM %s ORDER BY time DESC' % KeyBox.TABLE_NAME)
for row in self.cursor:
if row[0] != KeyBox.MASTER_KEY_TITLE:
title_time_list.append((row[0], row[1]))
return title_time_list
def search(self, keywords):
keywords_lower = {keyword.lower() for keyword in keywords}
matching_title_time_list = []
for title, mod_time in self.list():
title_lower = title.lower()
match = True
for keyword in keywords_lower:
if title_lower.find(keyword) == -1:
match = False
break
if match:
matching_title_time_list.append((title, mod_time))
return matching_title_time_list
def exists(self, title):
self.cursor.execute("SELECT time FROM %s WHERE title=?" % KeyBox.TABLE_NAME, (title,))
return self.cursor.fetchone() is not None
def init_master_password(self, table=TABLE_NAME):
password = input_password("Create a new master password: ")
if password == input_password("Confirm the master password: "):
self.aes_key = hashlib.sha256(password).digest()
# the hash of the AES key, stored in db for master password verification
key_hash = hashlib.sha256(self.aes_key).hexdigest()
self.cursor.execute("INSERT OR REPLACE INTO %s VALUES (?,?,?)" % table,
(KeyBox.MASTER_KEY_TITLE, time.time(), key_hash))
self.conn.commit()
else:
exit_with_error("Error: password not match, please retry")
def verify_master_password(self):
# get the stored key hash
self.cursor.execute("SELECT content FROM %s WHERE title=?"
% KeyBox.TABLE_NAME, (KeyBox.MASTER_KEY_TITLE,))
stored_key_hash = self.cursor.fetchone()[0]
# input master password
password = input_password("Master password: ")
self.aes_key = hashlib.sha256(password).digest()
# compare key hash
if hashlib.sha256(self.aes_key).hexdigest() != stored_key_hash:
exit_with_error("Error: incorrect master password, please retry")
def view(self, title):
self.cursor.execute("SELECT time, content FROM %s WHERE title=?"
% KeyBox.TABLE_NAME, (title,))
mod_time, encrypted = self.cursor.fetchone()
return mod_time, decrypt(encrypted, self.aes_key)
def set(self, title, plain, mod_time=time.time(), table=TABLE_NAME):
# for better print effect
if plain[-1] != "\n": plain += "\n"
encrypted = encrypt(plain, self.aes_key)
self.cursor.execute("INSERT OR REPLACE INTO %s VALUES (?,?,?)" % table,
(title, mod_time, sqlite3.Binary(encrypted)))
self.conn.commit()
def delete(self, title):
mod_time, plain = self.view(title)
self.cursor.execute("DELETE FROM %s WHERE title=?" % KeyBox.TABLE_NAME, (title,))
self.conn.commit()
return mod_time, plain
def reset(self):
tmp_table = "_tmp_"
self.cursor.execute('DROP TABLE IF EXISTS %s' % tmp_table)
self.cursor.execute('CREATE TABLE %s (title TEXT PRIMARY KEY, time LONG, content BLOB)' % tmp_table)
keys = []
for title, mod_time in self.list():
content = self.view(title)
keys.append((title, mod_time, content))
self.init_master_password(table=tmp_table)
for title, mod_time, content in keys:
self.set(title, content, mod_time=mod_time, table=tmp_table)
self.cursor.execute("DROP TABLE %s" % KeyBox.TABLE_NAME)
self.cursor.execute("ALTER TABLE %s RENAME TO %s" % (tmp_table, KeyBox.TABLE_NAME))
self.conn.commit()
def input_content(title):
sys.stdout.write("Input content of '%s', enter an empty line to finish:\n" % title)
lines = []
while True:
line = raw_input()
if line:
lines.append(line)
else:
break
return '\n'.join(lines)
def input_password(text):
password = getpass.getpass(text)
if password == "":
exit_with_error("Error: password not given")
return password
def encrypt(plain, aes_key):
iv = ''.join(chr(random.randint(0, 0xFF)) for _ in range(Crypto.Cipher.AES.block_size))
cipher = Crypto.Cipher.AES.AESCipher(aes_key, Crypto.Cipher.AES.MODE_CFB, iv)
return iv + cipher.encrypt(plain)
def decrypt(encrypted, aes_key):
iv = encrypted[0:Crypto.Cipher.AES.block_size]
cipher = Crypto.Cipher.AES.AESCipher(aes_key, Crypto.Cipher.AES.MODE_CFB, iv)
return cipher.decrypt(encrypted[Crypto.Cipher.AES.block_size:])
def read_keys(a_file):
"""
Supported text file format is as follows:
KEY: key title1
MOD: 1540820240.0
key content line 11
key content line 12
...
KEY: key title2
key content line 21
key content line 22
...
"""
keys = []
with open(a_file, 'r') as fd:
title = ''
mod_time = -1
content_lines = []
for line in fd:
line = line.strip()
if line.startswith("KEY: "): # title line
if title != '' and content_lines != []:
# remove the empty lines at the end
while len(content_lines) > 0 and content_lines[-1] == "\n":
content_lines = content_lines[:-1]
# add to keys for return
if mod_time < 0: mod_time = time.time()
keys.append((title, mod_time, '\n'.join([aLine for aLine in content_lines])))
# set next key title, and clear content
title = line[5:]
content_lines = []
elif line.startswith("MOD: "):
mod_time = float(line[5:])
elif title != "":
content_lines.append(line)
else:
sys.stderr.write("Warn: line '%s' ignored: title missing\n" % line)
# process the last key
if title != '' and content_lines != []:
# remove the empty lines at the end
while len(content_lines) > 0 and content_lines[-1] == "\n":
content_lines = content_lines[:-1]
# add to keys for return
if mod_time < 0: mod_time = time.time()
keys.append((title, mod_time, '\n'.join([aLine for aLine in content_lines])))
return keys
def exit_with_error(err_msg, err_code=-1):
sys.stderr.write(err_msg + "\n")
sys.exit(err_code)
def get_default_db_file():
keybox_file = "%s/.keybox" % os.environ['HOME']
if not os.path.exists(keybox_file):
return "%s/%s.keybox" % (os.environ['HOME'], os.environ['USER'])
with open(keybox_file, 'r') as fd:
for line in fd:
return line
def set_default_db_file(a_file):
keybox_file = "%s/.keybox" % os.environ['HOME']
with open(keybox_file, 'w') as fd:
fd.write(os.path.abspath(a_file))
def main():
# parse command line arguments
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-d', '--database',
help=('the sqlite database file to store keys. ' +
'Default: the previously used database file (see its location in %s/.keybox), ' +
'or %s/%s.keybox') % (os.environ["HOME"], os.environ["HOME"], os.environ['USER']))
subparsers = parser.add_subparsers(title="sub_commands", dest="action",
metavar='help|list|view|add|mod|del|import|export|reset')
subparsers.add_parser("help", help="show this help message and exit")
subparsers.add_parser("list", help="list all key titles (this is default)")
sub_parser = subparsers.add_parser("add", help="add a new key title and content")
sub_parser.add_argument("title", help="a key title")
sub_parser = subparsers.add_parser("view", help="view the content for the key title matching the given keywords")
sub_parser.add_argument("keyword", nargs="+", help="a keyword")
sub_parser = subparsers.add_parser("mod", help="modify the content for the key title matching the given keywords")
sub_parser.add_argument("keyword", nargs="+", help="a keyword")
sub_parser = subparsers.add_parser("del",
help="delete an existing key title matching the given keywords and the key " +
"content")
sub_parser.add_argument("keyword", nargs="+", help="a keyword")
sub_parser = subparsers.add_parser("import", help="import all key titles and contents from a text file")
sub_parser.add_argument("file", help="a text file containing key titles and contents to import")
sub_parser = subparsers.add_parser("export", help="export all key titles and contents to stdout or a file")
sub_parser.add_argument("file", nargs='?', help="a text file to export the key titles and contents")
subparsers.add_parser("reset", help="reset the master password")
# 'list' if no sub-command is given
if len(sys.argv) == 1:
sys.argv.append('list')
args = parser.parse_args()
if args.action == 'help':
parser.print_help()
sys.exit(0)
if args.database is None:
args.database = get_default_db_file() # type: str
else:
set_default_db_file(args.database)
keybox = KeyBox(args.database)
if args.action == 'list':
title_time_array = keybox.list()
if len(title_time_array) == 0:
sys.stdout.write("No item found\n")
else:
for title, mod_time in title_time_array:
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time)) + " - " + title
sys.exit(0)
# check errors before init or verify master password
if args.action == 'add':
if keybox.exists(args.title):
exit_with_error("Error: '%s' exists, try to view it or add with another title" % args.title)
if args.action in ['view', 'mod', 'del']:
matches = keybox.search(args.keyword)
if len(matches) == 0:
exit_with_error(
"Error: no title matching the given keywords, try to list all titles or change to another title")
else:
sys.stdout.write("Found the following titles:\n")
for index, (title, mod_time) in enumerate(matches):
mod_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time))
print "[%d] %s - %s" % (index, mod_str, title)
index = 0
if len(matches) > 1:
index = -1
while index < 0 or index >= len(matches):
index = raw_input("Select: [0] ").strip()
if len(index) == 0:
index = 0
break
else:
try:
index = int(index)
except ValueError:
pass
args.title = matches[index][0]
elif args.action == "import":
if not os.path.exists(args.file):
exit_with_error("Error: file '%s' not found." % args.file)
elif args.action == "export":
fd = sys.stdout
if args.file is not None:
if os.path.exists(args.file):
exit_with_error("Error: file exists, please choose a different file to export")
else:
fd = open(args.file, 'w')
elif args.action == "reset":
if not keybox.exists(KeyBox.MASTER_KEY_TITLE):
exit_with_error("Error: master password is not set yet")
if not keybox.exists(KeyBox.MASTER_KEY_TITLE):
keybox.init_master_password()
else:
keybox.verify_master_password()
if args.action == 'add':
plain = input_content(args.title)
keybox.set(args.title, plain)
elif args.action == "view":
mod_time, plain = keybox.view(args.title)
mod_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time))
sys.stdout.write("---\nKEY: %s\nMOD: %s\n%s---\n" % (args.title, mod_str, plain))
elif args.action == "mod":
sys.stdout.write("---\n%s---\n" % keybox.view(args.title)[1])
plain = input_content(args.title)
keybox.set(args.title, plain)
elif args.action == "del":
mod_time, plain = keybox.view(args.title)
mod_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mod_time))
sys.stdout.write("---\nKEY: %s:\nMOD: %s\n%s---\n" % (args.title, mod_str, plain))
confirm = raw_input("Confirm to delete key '%s' [yes/no]? " % args.title)
while confirm not in ['yes', 'no']:
confirm = raw_input("Confirm to delete key '%s' [yes/no]? " % args.title)
if confirm == 'yes':
keybox.delete(args.title)
sys.stdout.write("Deleted.\n")
elif args.action == "import":
for title, mod_time, content in read_keys(args.file):
if keybox.exists(title):
sys.stdout.write("skipped %s: exists in database\n" % title)
else:
keybox.set(title, content, mod_time=mod_time)
sys.stdout.write("imported %s\n" % title)
elif args.action == "export":
if fd == sys.stdout: fd.write("---\n")
for title, mod_time in keybox.list():
fd.write("KEY: %s\n" % title)
fd.write("MOD: %s\n" % mod_time)
fd.write("%s" % keybox.view(title)[1])
if fd == sys.stdout:
fd.write("---\n")
else:
fd.write("\n")
if fd != sys.stdout:
sys.stdout.write("Exported to file %s\n" % args.file)
elif args.action == "reset":
keybox.reset()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stdout.write("\nUser aborted.\n")
| apache-2.0 | 3,663,396,818,684,847,600 | 38.75853 | 118 | 0.57057 | false | 3.79174 | false | false | false |
DigitalSlideArchive/large_image | large_image/constants.py | 1 | 1924 | #############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
class SourcePriority:
NAMED = 0 # Explicitly requested
PREFERRED = 1
HIGHER = 2
HIGH = 3
MEDIUM = 4
LOW = 5
LOWER = 6
FALLBACK = 7
MANUAL = 8 # Will never be selected automatically
TILE_FORMAT_IMAGE = 'image'
TILE_FORMAT_PIL = 'PIL'
TILE_FORMAT_NUMPY = 'numpy'
TileOutputMimeTypes = {
# JFIF forces conversion to JPEG through PIL to ensure the image is in a
# common colorspace. JPEG colorspace is complex: see
# https://docs.oracle.com/javase/8/docs/api/javax/imageio/metadata/
# doc-files/jpeg_metadata.html
'JFIF': 'image/jpeg',
'JPEG': 'image/jpeg',
'PNG': 'image/png',
'TIFF': 'image/tiff',
}
TileOutputPILFormat = {
'JFIF': 'JPEG'
}
TileInputUnits = {
None: 'base_pixels',
'base': 'base_pixels',
'base_pixel': 'base_pixels',
'base_pixels': 'base_pixels',
'pixel': 'mag_pixels',
'pixels': 'mag_pixels',
'mag_pixel': 'mag_pixels',
'mag_pixels': 'mag_pixels',
'magnification_pixel': 'mag_pixels',
'magnification_pixels': 'mag_pixels',
'mm': 'mm',
'millimeter': 'mm',
'millimeters': 'mm',
'fraction': 'fraction',
}
| apache-2.0 | -6,192,587,494,464,616,000 | 28.6 | 77 | 0.591476 | false | 3.562963 | false | false | false |
facebookresearch/ParlAI | parlai/tasks/taskntalk/agents.py | 1 | 3737 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import Teacher
from parlai.utils.io import PathManager
from .build import build
import json
import os
import random
def _path(opt, task_size='small'):
"""Return path to json file of dataset - it can be train/valid file
of small/large dataset. Validation data is used for test as well,
because labels are inferred from the image and task itself.
"""
dt = opt['datatype'].split(':')[0]
# ensure data is built
build(opt)
if dt == 'train':
file_name = 'train.json'
elif dt == 'valid' or dt == 'test':
file_name = 'valid.json'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'taskntalk', task_size, file_name)
return data_path
class AbstractTaskNTalk(Teacher):
"""
TaskNTalk basic teacher, it picks a random image and associates a random task with
it.
Metric updates and observation are to be implemented.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'taskntalk'
if not shared:
self._setup_data(self.opt['datafile'])
else:
self.data = shared['data']
self.task_defn = shared['task_defn']
self.task_index = shared['task_index']
def _setup_data(self, data_path):
"""
Read the json file and store images and task definitions.
"""
print('loading: ' + data_path)
with PathManager.open(data_path) as data_file:
json_data = json.load(data_file)
self.data = json_data['data']
self.task_defn = json_data['task_defn']
# images are [color, shape, style] lists (example: ['red', 'square', 'dotted'])
self.task_index = {'color': 0, 'shape': 1, 'style': 2}
random.shuffle(self.data)
def share(self):
"""
Share images and task definitions with other teachers.
"""
shared = super().share()
shared['data'] = self.data
shared['task_defn'] = self.task_defn
shared['task_index'] = self.task_index
return shared
def __len__(self):
return len(self.data)
def observe(self, observation):
"""
Process observation for metrics.
"""
self.observation = observation
# TODO(kd): update metrics
return observation
def act(self):
"""
Select random image and associate random task with it.
"""
image = random.choice(self.data)
task = random.choice(self.task_defn)
labels = [image[self.task_index[attr]] for attr in task]
action = {
'image': ' '.join(image),
'text': ' '.join(task),
'labels': [' '.join(labels)],
'episode_done': True,
}
# TODO(kd): fetch all data for valid/test
return action
class SmallTeacher(AbstractTaskNTalk):
"""
Teacher for small dataset, invoked by ``taskntalk:small``.
"""
def __init__(self, opt, shared=None):
opt['datafile'] = _path(opt, 'small')
super().__init__(opt, shared)
class LargeTeacher(AbstractTaskNTalk):
"""
Teacher for large dataset, invoked by ``taskntalk:large``.
"""
def __init__(self, opt, shared=None):
opt['datafile'] = _path(opt, 'large')
super().__init__(opt, shared)
class DefaultTeacher(SmallTeacher):
"""
Default teacher for small dataset, invoked by ``taskntalk``.
"""
pass
| mit | 5,102,321,009,451,953,000 | 27.968992 | 87 | 0.587905 | false | 3.896767 | false | false | false |
Subsets and Splits