repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pepincho/playground | python/Learn-Python-The-Hard-Way/exercises11to20.py | 1 | 2190 | # exercise 11
name = input("What's your name? ") # take the name from the keyboard
print ("Your name is {}".format(name))
# exercise 15
file_again = input("Type the filename again: > ") # read the file's name from the keyboard
txt_again = open(file_again) # open the file
print (txt_again.read()) # print the file's content
# exercise 16
print ("Opening the file...")
target_name = input("Type the filename: > ")
target_txt = open(target_name, "r+")
print ("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print ("I'm going to write these to the file.")
target_txt.write(line1)
target_txt.write("\n")
target_txt.write(line2)
target_txt.write("\n")
target_txt.write(line3)
target_txt.write("\n")
print ("And finally, we close it.")
target_txt.close()
# exerckse 17, read from a file and write in another file
from os.path import exists
from_file = input("From file: > ")
to_file = input("To file: > ")
in_file = open(from_file)
indata = in_file.read()
print ("The input file is {} bytes long.".format(len(indata)))
print ("Does the output file exist? {}".format(exists(to_file)))
out_file = open(to_file, 'w')
out_file.write(indata)
print ("Alright, all done.")
out_file.close()
in_file.close()
# exercise 18
def print_two(*args):
arg1, arg2 = args
print ("arg1: %r, arg2: %r" % (arg1, arg2))
def print_two_again(arg1, arg2):
print ("arg1: %r, arg2: %r" % (arg1, arg2))
def print_none():
print ("I got nothin'.")
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_none()
# exercise 20
def print_all(f):
print (f.read())
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print (line_count, f.readline())
file_name = input("File name: > ")
current_file = open(file_name)
print ("First let's print the whole file:\n")
print_all(current_file)
print ("Now let's rewind, kind of like a tape.")
print (rewind(current_file))
print ("Let's print three lines:")
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
| mit | -1,002,428,991,431,571,700 | 21.121212 | 90 | 0.67032 | false | 2.807692 | false | false | false |
bfontaine/Teebr | teebr/features.py | 1 | 6724 | # -*- coding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from json import dumps
from collections import defaultdict
from .log import mkLogger
from .text.utils import contains_emoji, extract_named_entities
from .text.utils import most_common_words
from .text.spam import is_spam
logger = mkLogger("features")
LANGUAGES = ('en',) # 'fr')
SOURCE_TYPES = {
"source_mobile": [
"Echofon",
"Mobile Web (M2)",
"Mobile Web (M5)",
"Mobile Web",
"Samsung Mobile",
"Twitter for Android",
"Twitter for BlackBerry®",
"Twitter for Windows Phone",
"Twitter for iPhone",
"Twitterrific",
"iOS",
"uberSocial for Android",
],
"source_tablet": [
"Twitter for Android Tablets",
"Twitter for iPad",
],
"source_desktop": [
"TweetDeck",
"Twitter Web Client",
"Twitter for Mac",
"OS X",
],
# automated publication tools + bot-like tweets
"source_autopub": [
"Buffer",
"Hootsuite",
"IFTTT",
"JustUnfollow",
"RoundTeam",
"TweetAdder v4",
"fllwrs",
"twittbot.net",
],
"source_social": [
"Ask.fm",
"Facebook",
"Foursquare",
"Instagram",
"LinkedIn",
"Path",
"Pinterest",
"Reddit RSS",
"Vine - Make a Scene",
"Vine for Android",
],
"source_news": [
"Nachrichten News",
],
"source_other": [],
}
URL_TYPES = {
"url_social": [
"fb.me",
"path.com",
],
"url_social_media": [
"vine.co",
"instagram.com",
],
"url_product": [
"amzn.to",
],
"url_video": [
"youtu.be",
],
}
# TODO we might be able to remove this now that we have a spam filter
APPS_BLACKLIST = set([
# followers spam
u"Unfollowers",
u"JustUnfollow",
u"fllwrs",
u"..ignite.v.1.",
u"Adi Sumardiyasa",
u"Who Unfollowed Me",
# tweets ranking
u"001FM Top40 Tweets",
# Games
u"1Dreamboy 2 Game",
u"1Dreamboy Version 2 Game",
u"Airport City Mobile",
u"The Tribez HD on iOS",
# General news
u"233Live Tweets",
u"247newz",
# Misc news
u"ADVFN News Alert",
u"APD Traffic Alerts",
# Buzzfeed-like
u"75325love",
u"AlltheGoss",
u"AllHealthSecrets.com",
u"Amusing information",
u"volkanc",
u"awe.sm",
# nsfw
u"definebabecom",
u"Cumagination Gay",
u"Cumagination Lesbian",
u"EscortGuidexXx",
u"TweetAdder v",
# Misc Spam
u";sdklafjas",
u"Acne-Treatments-and-Tips.com",
u"AmazonRecommend",
# Others
u"Adcourier",
])
# some apps add numbers at the end, e.g. MySpam, MySpam1, MySpam2, etc
END_DIGITS = re.compile(r"\s*\d+$")
entity_keys = ("urls", "hashtags", "user_mentions", "trends", "symbols", "media")
def filter_status(st):
"""
Check if we should include a status as returned by the Streaming API in our
DB. It'll return ``False`` if it should be rejected.
"""
# keep only some languages
if st.lang not in LANGUAGES:
return False
# remove replies
if st.in_reply_to_screen_name:
return False
# remove RTs
if getattr(st, 'retweeted_status', False):
return False
# remove suspicious apps
if not st.source or not st.source_url:
return False
# remove spam apps
if END_DIGITS.sub("", st.source) in APPS_BLACKLIST:
return False
# remove manual RTs
if st.text.startswith("RT @") or st.text.startswith("MT @"):
return False
# remove manual responses
if st.text.startswith(".@"):
return False
# remove other spam tweets
if is_spam(st.text):
return False
# ok
return True
class FeaturesDict(defaultdict):
def __init__(self, st):
super(FeaturesDict, self).__init__(float)
self._st = st
def compute_features(self):
"""
Compute all features for this tweet
"""
self._set_source_type()
self._set_extra_entities()
st = self._st
self["sg_geolocalized"] = float(st.geo is not None)
self["sg_lang_%s" % st.lang] = 1.0
self["sg_contributors"] = float(st.contributors is not None)
self["sg_emojis"] = contains_emoji(st.text)
# some statuses don't have this attribute
self["sg_nsfw"] = getattr(st, "possibly_sensitive", 0.0)
entities = extract_named_entities(st.text)
self["names"] = ",".join(entities)
for entity in entities:
entity = entity.lower()
if entity in most_common_words:
self["sg_mc_word_%s" % entity] = 1.0
self["retweet_count"] = getattr(st, "retweet_count", 0.0)
self["favorite_count"] = getattr(st, "favorite_count", 0.0)
for key in entity_keys:
self["sg_%s" % key] = int(bool(self._st.entities["urls"]))
def _set_source_type(self):
"""
Feature: source type
Keys: source_mobile, source_desktop, source_autopub, source_social,
source_tablet, source_other, ... (see SOURCE_TYPES)
Values: [0, 1]
"""
text = self._st.source.strip()
for s,vs in SOURCE_TYPES.items():
if text in vs:
self["sg_%s" % s] = 1.0
return
ltext = text.lower()
for brand in ("android", "iphone", "blackberry", "windows phone"):
if ltext.endswith(" for %s" % brand):
self["sg_source_mobile"] = 1.0
return
self["sg_source_others"] = 1.0
def _set_extra_entities(self):
extra = {}
media = getattr(self._st, "entities", {}).get("media", [])
if media:
photos = []
for m in media:
# TODO check the format for videos
if m.get("type") != "photo":
continue
photos.append({
# The image URL
"media_url": m["media_url_https"],
# The URL included in the status (expanded by us)
"url": m["expanded_url"],
})
extra["photos"] = photos
self["extra_entities"] = dumps(extra)
def compute_features(status):
expand_urls(status)
f = FeaturesDict(status)
f.compute_features()
return f
def expand_urls(st):
entities = getattr(st, "entities", {})
for link in entities.get("urls", []) + entities.get("media", []):
st.text = st.text.replace(link["url"], link["expanded_url"])
| mit | -323,460,344,605,223,000 | 22.840426 | 81 | 0.542466 | false | 3.519895 | false | false | false |
jabez007/Training_Helpyr | Setup/__init__.py | 1 | 7646 | import re
import os
APP_PATH = os.path.join(*os.path.split(os.path.dirname(os.path.realpath(__file__)))[:-1])
import sys
if APP_PATH not in sys.path:
sys.path.append(APP_PATH)
import MyTrack
import PowerShell
import Phonebook
import Overlord
import Log
LOGGER = Log.MyLog(name=__name__)
# # # #
"""
Special setup for Care Everywhere 101 (fka CE-500)
"""
def ce500(instructor, trainees, code="CSCce500setup"):
"""
entry point for setting up CE 101 (FKA CE500)
:param instructor: <string> the cache environment for the Instructor
:param trainees: <string> the cache environments for the trainees
:param code: <string> the Overlord tag the needs to be ran in each environment to complete setup
:return: <bool> True if everything was successful
"""
gwn = None
instr = "".join([c for c in instructor if c.isdigit()])
trns = clean_caches(trainees)
if instr:
'''
if this is a fresh class setup, as in we are not just adding trainee environments to an existing class
'''
# pull out the last trainee environment and make it GWN
gwn = trns[-1:]
if gwen(gwn):
# then take that environment out of the list we'll set up later
trns = trns[:-1]
LOGGER.info("epic-trn%s set up as GWN environment" % gwn[0])
else:
# otherwise, skip the GWN setup and make this a normal environment
gwn = None
LOGGER.error("Galaxy Wide Network not set up")
setup_instructor(instr)
# Connect Interconnects to trainee environments
if not assign_interconnects("CE500", trns):
return False
# Update Training Phone Book with new environment assignments
if not update_phonebook(trns):
return False
# Restart the Training Phone Book so our changes take affect
if not PowerShell.restart_phonebook():
LOGGER.error("Error in restarting Training Phonebook Interconnect")
return False
# Run Cache setup script
if not setup_cache([instr]+trns, code):
return False
if gwn is not None:
setup_cache(gwn, code, "GWeN")
return True
def setup_instructor(instructor):
"""
runs the setup particular to the instructor environment
:param instructor: <string> the cache environment for the class instructor
:return: <bool> True is everything was successful
"""
# Connect Interconnect to instructor environment
if not PowerShell.setup('01', instructor):
LOGGER.error("Failed to connect epic-trn%s to CE500 instructor Interconnect. See powershell.err" % instructor)
return False
# Save to tracking database
if not MyTrack.assign("Instructors", "train01", "epic-trn"+instructor):
LOGGER.error("Setup between CE500 instructor Interconnect and epic-trn%s not saved to database. See my_track.err"
% instructor)
# Reset TRN Phonebook and register Instructor environment
if not Phonebook.TrnPhonebook().instructor(instructor):
LOGGER.error("Error in registering epic-trn%s as the Instructor environment in the Training Phonebook. See TRNphonebook.err"
% instructor)
return False
LOGGER.info("epic-trn%s set up as instructor environment" % instructor)
return True
def update_phonebook(trainees):
"""
updates the training Phonebook with trainee environments for this class
:param trainees: <list(string)> the cache environments for the trainees
:return: <bool> True if everything was successful
"""
for cache in trainees:
if not Phonebook.TrnPhonebook().register(cache):
LOGGER.error("Error in registering epic-trn%s with Training Phonebook. See TRNphonebook.err" % cache)
return False
LOGGER.info("Trainee environments registered in phonebook")
return True
def gwen(trainee):
"""
runs the setup particular to the Galaxy Wide Network environment
:param trainee: <string> the cache environment for GWN
:return: <bool> True if everything was successful
"""
# assign interconnect - this should be the same as the other trainee environments
assign_interconnects("CE500", trainee)
# update Phonebook
if not Phonebook.TrnPhonebook().register_gwn(trainee[0]):
return False
# setup cache for GWN with the other environments
return True
# # # #
"""
Generic Care Everywhere setup for IP and AMB Funds classes
"""
def funds(caches, code="CSCInpFunds"):
"""
:param caches: <string>
:param code: <string>
:return: <bool>
"""
trns = clean_caches(caches)
if not assign_interconnects("AMB_IP", trns):
return False
if code:
if not setup_cache(trns, code):
return False
return True
# # # #
"""
used by both Care Everywhere 101 and IP/AMB Funds
"""
def clean_caches(caches):
"""
uses regex to parse out our cache environments passed in
:param caches: <string>
:return: <list(string)>
"""
return_caches = list()
data = re.finditer("([a-zA-Z0-9\-]+)", caches)
for d in data:
cache = "".join([s for s in d.group(1) if s.isdigit()])
# make sure we have an environment and that it's not already assigned
if cache and not MyTrack.check_assigned(cache):
return_caches.append(cache)
return return_caches
def assign_interconnects(_class, trns):
assigned_interconnects = 1 # CE500 instructor always gets Interconnect 1
clss = _class
for cache in trns:
# #
if ("CE500" in _class) and (assigned_interconnects >= 40): # if training overbooks us, steal from FUNDs
clss = "AMB_IP"
interconnect = "".join([s for s in MyTrack.get("unassigned", "AMB_IP") if s.isdigit()])
else:
interconnect = "".join([s for s in MyTrack.get("unassigned", _class) if s.isdigit()])
# #
if interconnect:
if not PowerShell.setup(interconnect, cache):
LOGGER.error("Powershell failed to connect epic-trn%s to train%s" % (cache, interconnect))
return False
assigned_interconnects += 1
if not MyTrack.assign(clss, "train"+interconnect, "epic-trn"+cache):
LOGGER.error("Setup between epic-trn%s and train%s not saved to MyTrack" % (cache, interconnect))
return False
else:
LOGGER.error("No Interconnect returned from MyTrack for epic-trn%s" % cache)
return False
LOGGER.info("epic-trn%s connected to Interconnect-train%s" % (cache, interconnect))
return True
def setup_cache(trns, code, flag=""):
success = True
for trn in trns:
if not Overlord.overlord(trn, code, flag):
LOGGER.error("Error running %s. See Overlord logs" % code)
success = False
# LOGGER.info("%s successfully ran in %s" % (code, ", ".join(trns)))
return success
# # # #
if __name__ == "__main__":
import datetime
import Outlook
for days in range(2): # setup today's and tomorrow's classes
tomorrow = (datetime.datetime.now() + datetime.timedelta(days=days)).strftime("%m/%d/%Y") # MM/DD/YYYY
print("Setting up classes for %s:" % tomorrow)
classes = MyTrack.setup_schedule(tomorrow)
for new_class in classes:
if funds(new_class[0]):
print("\t%s - email to %s" % (new_class[0], new_class[1]))
Outlook.send_email(e_address=new_class[1], env=new_class[0])
else:
print("\t%s failed" % new_class[0])
| mit | -4,601,870,185,056,420,400 | 31.675214 | 132 | 0.639681 | false | 3.836427 | false | false | false |
aricaldeira/PySPED | pysped/cte/webservices_flags.py | 1 | 2161 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from ..nfe.webservices_flags import UF_CODIGO, CODIGO_UF
WS_CTE_AUTORIZACAO = 0
WS_CTE_CONSULTA_AUTORIZACAO = 1
WS_CTE_INUTILIZACAO = 1
WS_CTE_CONSULTA = 3
WS_CTE_SITUACAO = 4
WS_CTE_RECEPCAO_EVENTO = 2
WS_CTE_RECEPCAO_OS = 5
WS_CTE_DISTRIBUICAO = 6
CTE_AMBIENTE_PRODUCAO = 1
CTE_AMBIENTE_HOMOLOGACAO = 2
| lgpl-2.1 | -2,877,343,729,118,594,600 | 36.54386 | 76 | 0.757944 | false | 2.801047 | false | false | false |
sunoru/pokemon_only | stall/migrations/0001_initial.py | 1 | 6359 | # Generated by Django 2.2.2 on 2019-06-04 21:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('validated', models.BooleanField(default=False)),
('name', models.CharField(default='未命名', max_length=50)),
('item_type', models.CharField(default='', help_text='种类', max_length=20)),
('content', models.CharField(default='', help_text='内容', max_length=100)),
('price', models.FloatField(default=0, help_text='价格')),
('url', models.URLField(default='', help_text='链接')),
('authors', models.TextField(default='', help_text='作者名单')),
('introduction', models.TextField(default='', help_text='简介')),
('cover_image', models.ImageField(help_text='封面图片', max_length=1024, null=True, upload_to='items/%Y/%m/%d')),
('forto', models.CharField(default='', help_text='面向人群', max_length=20)),
('is_restricted', models.CharField(default='', help_text='限制级是否', max_length=20)),
('circle', models.CharField(default='', help_text='出品社团', max_length=40)),
('is_started_with', models.BooleanField(default=False, help_text='是否首发')),
('item_order', models.IntegerField(default=0, help_text='商品排序')),
],
options={
'ordering': ['seller'],
},
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=255, unique=True)),
('value', models.TextField(default='')),
],
),
migrations.CreateModel(
name='Seller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('email', models.EmailField(max_length=30, verbose_name='email address')),
('is_active', models.BooleanField(default=False, help_text='是否激活')),
('signup_datetime', models.DateTimeField(auto_now=True)),
('signup_address', models.GenericIPAddressField()),
('is_stall', models.BooleanField(help_text='是否摊位')),
('circle_name', models.CharField(help_text='社团名', max_length=40)),
('circle_description', models.TextField(help_text='社团介绍')),
('circle_image', models.ImageField(help_text='社团图标', upload_to='circle/%Y/%m/%d')),
('seller_id', models.CharField(default='', help_text='摊位号', max_length=10)),
('proposer_name', models.CharField(help_text='申请人姓名', max_length=20)),
('proposer_sex', models.CharField(help_text='性别', max_length=20)),
('proposer_qq', models.CharField(help_text='QQ', max_length=11)),
('proposer_phone', models.CharField(help_text='电话', max_length=20)),
('proposer_id', models.CharField(help_text='身份证号', max_length=18)),
('booth', models.FloatField(default=1, help_text='申请摊位数')),
('number_of_people', models.SmallIntegerField(default=1, help_text='申请人数')),
('remarks', models.TextField(default='', help_text='备注')),
('status', models.IntegerField(help_text='状态')),
('notice', models.TextField(default='', help_text='通知')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ValidateCode',
fields=[
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('code', models.CharField(max_length=20, primary_key=True, serialize=False)),
('validated', models.BooleanField(default=False)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Seller')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ItemPicture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('picture', models.ImageField(help_text='图片', max_length=1024, upload_to='items/%Y/%m/%d')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Item')),
],
),
migrations.AddField(
model_name='item',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Seller'),
),
]
| gpl-2.0 | -6,446,297,086,749,532,000 | 59.343137 | 246 | 0.560845 | false | 3.61421 | false | false | false |
meraki-analytics/cassiopeia | cassiopeia/cassiopeia.py | 1 | 6069 | from typing import List, Set, Dict, Union, TextIO
import arrow
import datetime
from .data import Region, Queue, Season, Tier, Division, Position
from .core import Champion, Summoner, ChampionMastery, Rune, Item, Match, Map, SummonerSpell, Realms, ProfileIcon, LanguageStrings, CurrentMatch, ShardStatus, Versions, MatchHistory, Champions, ChampionMasteries, Runes, Items, SummonerSpells, Maps, FeaturedMatches, Locales, ProfileIcons, ChallengerLeague, GrandmasterLeague, MasterLeague, League, LeagueSummonerEntries, LeagueEntries, Patch, VerificationString, ChampionRotation
from .datastores import common as _common_datastore
from ._configuration import Settings, load_config, get_default_config
from . import configuration
# Settings endpoints
def apply_settings(config: Union[str, TextIO, Dict, Settings]):
if not isinstance(config, (Dict, Settings)):
config = load_config(config)
if not isinstance(config, Settings):
settings = Settings(config)
else:
settings = config
# Load any plugins after everything else has finished importing
import importlib
for plugin in settings.plugins:
imported_plugin = importlib.import_module("cassiopeia.plugins.{plugin}.monkeypatch".format(plugin=plugin))
print_calls(settings._Settings__default_print_calls, settings._Settings__default_print_riot_api_key)
# Overwrite the old settings
configuration._settings = settings
# Initialize the pipeline immediately
_ = configuration.settings.pipeline
def set_riot_api_key(key: str):
configuration.settings.set_riot_api_key(key)
def set_default_region(region: Union[Region, str]):
configuration.settings.set_region(region)
def print_calls(calls: bool, api_key: bool = False):
_common_datastore._print_calls = calls
_common_datastore._print_api_key = api_key
# Data endpoints
def get_league_entries(summoner: Summoner) -> LeagueEntries:
return summoner.league_entries
def get_paginated_league_entries(queue: Queue, tier: Tier, division: Division, region: Union[Region, str] = None) -> LeagueEntries:
return LeagueEntries(region=region, queue=queue, tier=tier, division=division)
def get_master_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> MasterLeague:
return MasterLeague(queue=queue, region=region)
def get_grandmaster_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> GrandmasterLeague:
return GrandmasterLeague(queue=queue, region=region)
def get_challenger_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> ChallengerLeague:
return ChallengerLeague(queue=queue, region=region)
def get_match_history(summoner: Summoner, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queues: Set[Queue] = None, seasons: Set[Season] = None, champions: Set[Champion] = None):
return MatchHistory(summoner=summoner, begin_index=begin_index, end_index=end_index, begin_time=begin_time, end_time=end_time, queues=queues, seasons=seasons, champions=champions)
def get_match(id : int, region: Union[Region, str] = None) -> Match:
return Match(id=id, region=region)
def get_featured_matches(region: Union[Region, str] = None) -> FeaturedMatches:
return FeaturedMatches(region=region)
def get_current_match(summoner: Summoner, region: Union[Region, str] = None) -> CurrentMatch:
return CurrentMatch(summoner=summoner, region=region)
def get_champion_masteries(summoner: Summoner, region: Union[Region, str] = None) -> ChampionMasteries:
return ChampionMasteries(summoner=summoner, region=region)
def get_champion_mastery(summoner: Summoner, champion: Union[Champion, int, str], region: Union[Region, str] = None) -> ChampionMastery:
return ChampionMastery(champion=champion, summoner=summoner, region=region)
def get_summoner(*, id: str = None, account_id: str = None, name: str = None, region: Union[Region, str] = None) -> Summoner:
return Summoner(id=id, account_id=account_id, name=name, region=region)
def get_champion(key: Union[str, int], region: Union[Region, str] = None) -> Champion:
return get_champions(region=region)[key]
def get_champions(region: Union[Region, str] = None) -> Champions:
return Champions(region=region)
def get_runes(region: Union[Region, str] = None) -> Runes:
return Runes(region=region)
def get_summoner_spells(region: Union[Region, str] = None) -> SummonerSpells:
return SummonerSpells(region=region)
def get_items(region: Union[Region, str] = None) -> Items:
return Items(region=region)
def get_maps(region: Union[Region, str] = None) -> Maps:
return Maps(region=region)
def get_profile_icons(region: Union[Region, str] = None) -> ProfileIcons:
return ProfileIcons(region=region)
def get_realms(region: Union[Region, str] = None) -> Realms:
return Realms(region=region)
def get_status(region: Union[Region, str] = None) -> ShardStatus:
return ShardStatus(region=region)
def get_language_strings(region: Union[Region, str] = None) -> LanguageStrings:
return LanguageStrings(region=region)
def get_locales(region: Union[Region, str] = None) -> List[str]:
return Locales(region=region)
def get_versions(region: Union[Region, str] = None) -> List[str]:
return Versions(region=region)
def get_version(date: datetime.date = None, region: Union[Region, str] = None) -> Union[None, str]:
versions = get_versions(region)
if date is None:
return versions[0]
else:
patch = Patch.from_date(date, region=region)
for version in versions:
if patch.majorminor in version:
return version
return None
def get_verification_string(summoner: Summoner) -> VerificationString:
return VerificationString(summoner=summoner)
def get_champion_rotations(region: Union[Region, str] = None) -> ChampionRotation:
return ChampionRotation(region=region)
# Pipeline
def _get_pipeline():
return configuration.settings.pipeline
| mit | 2,522,235,741,749,909,500 | 35.341317 | 429 | 0.731257 | false | 3.276998 | true | false | false |
sk413025/tilitools | latentsvdd.py | 1 | 3222 | from cvxopt import matrix,spmatrix,sparse,uniform,normal,setseed
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
from cvxopt.lapack import syev
import numpy as np
import math as math
from kernel import Kernel
from svdd import SVDD
from ocsvm import OCSVM
import pylab as pl
import matplotlib.pyplot as plt
class LatentSVDD:
""" Latent variable support vector data description.
Written by Nico Goernitz, TU Berlin, 2014
For more information see:
'Learning and Evaluation with non-i.i.d Label Noise'
Goernitz et al., AISTATS & JMLR W&CP, 2014
"""
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
C = 1.0 # (scalar) the regularization constant > 0
sobj = [] # structured object contains various functions
# i.e. get_num_dims(), get_num_samples(), get_sample(i), argmin(sol,i)
sol = [] # (vector) solution vector (after training, of course)
def __init__(self, sobj, C=1.0):
self.C = C
self.sobj = sobj
def train_dc(self, max_iter=50):
""" Solve the LatentSVDD optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
N = self.sobj.get_num_samples()
DIMS = self.sobj.get_num_dims()
# intermediate solutions
# latent variables
latent = [0]*N
sol = 10.0*normal(DIMS,1)
psi = matrix(0.0, (DIMS,N)) # (dim x exm)
old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
threshold = 0
obj = -1
iter = 0
# terminate if objective function value doesn't change much
while iter<max_iter and (iter<2 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
print('Starting iteration {0}.'.format(iter))
print(sum(sum(abs(np.array(psi-old_psi)))))
iter += 1
old_psi = matrix(psi)
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, opt_type='quadratic')
# 2. solve the intermediate convex optimization problem
kernel = Kernel.get_kernel(psi,psi)
svdd = SVDD(kernel, self.C)
svdd.train_dual()
threshold = svdd.get_threshold()
inds = svdd.get_support_dual()
alphas = svdd.get_support_dual_values()
sol = psi[:,inds]*alphas
self.sol = sol
self.latent = latent
return (sol, latent, threshold)
def apply(self, pred_sobj):
""" Application of the LatentSVDD:
anomaly_score = min_z ||c*-\Psi(x,z)||^2
latent_state = argmin_z ||c*-\Psi(x,z)||^2
"""
N = pred_sobj.get_num_samples()
norm2 = self.sol.trans()*self.sol
vals = matrix(0.0, (1,N))
lats = matrix(0.0, (1,N))
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(max_obj, lats[i], foo) = pred_sobj.argmax(self.sol, i, opt_type='quadratic')
vals[i] = norm2 - max_obj
return (vals, lats)
| mit | -4,128,180,928,146,153,500 | 29.396226 | 86 | 0.646182 | false | 2.685 | false | false | false |
sniemi/SamPy | sandbox/src1/pviewer/pviewer.py | 1 | 31336 | #!/usr/bin/env python
from tkFileDialog import *
from Tkinter import *
from tkSimpleDialog import Dialog
import tkMessageBox
from plotAscii import *
from imageUtil import *
from view2d import *
from mdaAscii import *
import Pmw
import os, string
import AppShell
global Scan
global SH # SHARED
class setupPrinter(Dialog):
"Dialog for setting up printer "
def body(self,master):
self.title("Set Printer Dialog")
Label(master, text='Enter Printer Name:').grid(row=1, sticky=W)
self.label = StringVar()
self.label = Entry(master, width = 26 )
self.label.grid(row=1,column=1)
self.label.insert(0,SH['printer'])
return self.label
def apply(self):
SH['printer'] = self.label.get()
writeSH(SH)
class commandSyntax(Dialog):
"Dialog for sending a system command or any executable client"
def body(self,master):
self.title("Command Dialog")
self.commandsyntax = Pmw.EntryField(master, labelpos='w',
label_text='Enter Command:', value='',
command=self.valuechanged)
self.commandsyntax.pack(fill='x')
self.commandsyntax.component('entry').focus_set()
def valuechanged(self):
os.system(self.commandsyntax.get()+ ' &')
def apply(self):
self.destroy()
class pickDIdialog(Dialog):
"Dialog for selecting a text line which contains DI names to be used in multiline plot. If blank comment line picked, sequence number is used."
def body(self,master):
file = Scan['txtfile']
data = readArray(file)
nc = len(data[0])
self.nc = nc
fo = open(file,'r')
lines = fo.read()
fo.close()
lines = string.split(lines,'\n')
self.title("Pick Line where DI Names Resides")
box = Pmw.ScrolledListBox(master,
items=(lines),
labelpos=NW,label_font=SH['font'],
label_text='Extract column legends from the text window\nSelect the text line which contains\nlegends to be extracted for multi-line plot',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=700,hull_height=400)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
no = len(sels)
dc = no - self.nc
if dc >= 0:
sels = sels[dc:no]
ix = SH['ix']
sel = sels[ix+1:no]
else:
sel = range(self.nc)
V = []
for i in range(85):
V.append('')
for i in range(len(sel)):
V[i] = sel[i]
fo = open('pvs','w')
fo.write(str(V))
fo.close()
Scan['nc'] = len(V)
namedialog = GetLegends(self)
def apply(self):
self.destroy()
class GetXYVdialog(Dialog):
"Dialog to set column or line # of X, Y, DATA array located in the opend ascii 2D image file (generated by scanSee/catcher/yviewer)"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Extract X,Y,DATA array from scanSee ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X and Data column #:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='X Vector Column #').grid(row=1,column=1,sticky=W)
Label(master,text='Data Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Y Vector Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Y Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(0)
self.ix[1].set(2)
self.ix[2].set(3)
self.ix[3].set(2)
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
data = readArray(file)
nc = len(data)
nr = len(data[0])
data = rowreverse(data)
x = data[ix[0]]
data = data[ix[1]:nr]
data = array(data)
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
if ix[2] >= 0:
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
else:
y = range(len(data))
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class defineXYdialog(Dialog):
"Dialog for entering Xmin,Xmax,Ymin,Ymax ranges"
def body(self,master):
try:
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
nc = data.shape[1]
nr = data.shape[0]
Scan['im'] = data
font=SH['font'] #'Verdana 10 bold'
self.title("Set X, Y Ranges for Image Plot")
self.ix = [StringVar(),StringVar(),StringVar(),StringVar()]
Label(master,text='Enter X Plot Range',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Xmin').grid(row=1,column=1,sticky=W)
Label(master,text='Xmax').grid(row=2,column=1,sticky=W)
Label(master,text='Enter Y Plot Range',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Ymin').grid(row=4,column=1,sticky=W)
Label(master,text='Ymax').grid(row=5,column=1,sticky=W)
Entry(master,width=14,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(1.)
self.ix[1].set(float(nc))
self.ix[2].set(1.)
self.ix[3].set(float(nr))
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
ix = [string.atof(ix[0]),string.atof(ix[1]),string.atof(ix[2]),
string.atof(ix[3])]
data = Scan['im']
nr = data.shape[0]
nc = data.shape[1]
x = []
dx = (ix[1]-ix[0])/(nc-1)
for i in range(nc):
x.append(ix[0]+dx*i)
y = []
dy = (ix[3]-ix[2])/(nr-1)
for i in range(nr):
y.append(ix[2]+dy*i)
if Scan['updown']:
plot2dUpdown(data,x,y,title=Scan['txtfile'])
else:
plot2d(data,x,y,title=Scan['txtfile'])
class GetXYdialog(Dialog):
"Dialog for define X,Y vector line and column #"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Get X, Y Vectors from ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X Vector Defined in:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Line #').grid(row=1,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
# cl = Scan['rowcol']
cl = [3,2,4,2]
self.ix[0].set(cl[0])
self.ix[1].set(cl[1])
self.ix[2].set(cl[2])
self.ix[3].set(cl[3])
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
px = lines[ix[0]]
px = string.split(px)
x = px[ix[1]:len(px)]
for i in range(len(x)):
x[i] = string.atof(x[i])
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class GetXdialog(Dialog):
"Dialog for defining X column # in text file"
def body(self,master):
font=SH['font'] #'Verdana 10 bold'
self.title("1D Multi-Line Plot")
self.ix = IntVar()
Label(master,text='Defined valid X column # from text file:',font=font).pack(anchor=NW)
Label(master,text=Scan['txtfile'],font=font).pack(anchor=NW)
Label(master,text='-1 - No X column defined ').pack(anchor=NW)
Label(master,text=' 0 - X defined at First column').pack(anchor=NW)
Label(master,text=' 1 - X defined at Second column').pack(anchor=NW)
Label(master,text='Enter X Column Index #:',font=font).pack(side=LEFT)
self.ix = Entry(master, width = 4)
self.ix.pack(side=LEFT)
v = self.get()
self.ix.insert(0,v)
return self.ix
def get(self):
# fo.close()
SH = readSH()
ix = SH['ix']
return ix
def apply(self):
ix = self.ix.get()
SH['ix'] = string.atoi(ix)
writeSH(SH)
os.system('plotAscii.py '+Scan['txtfile']+' '+str(ix) +' &')
class pick2Ddetector(Dialog):
"Dialog to pick any detector from the MDA 2D detector list and plot the selected 2D detector image"
def body(self,master):
self.title("Select 2D Detector")
box = Pmw.ScrolledListBox(master,
items=('1','2','3','4'),
labelpos=NW,label_text='Pick Detector',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=200,hull_height=200)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = Scan['data']
pick2d(d,sel,updown=Scan['updown'])
def apply(self):
self.destroy()
class pviewer(AppShell.AppShell):
usecommandarea=1
balloonhelp=1
appversion = '1.0'
appname = 'pviewer'
copyright = 'Copyright ANL-APS-AOD-BCDA. All Rights Reserved'
contactname = 'Ben-chin K Cha'
contactphone = '(630) 252-8653'
contactemail = '[email protected]'
frameWidth = 800
frameHeight = 500
def unimplemented(self):
pass
def messageMDA(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open MDA...\n to load in an MDA file first',
padx=10,pady=10).pack()
box.activate()
def messageAscii(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open Ascii...\n to load in an ASCII file first',
padx=10,pady=10).pack()
box.activate()
def savepvs(self):
file = 'pvs'
V = self.apply()
fd = open(file,'w')
fd.write(str(V))
fd.close()
def createButtons(self):
self.buttonAdd('Exit',
helpMessage='Exit pviewer',
statusMessage='Exit pviewer',
command=self.closeup)
def startup(self):
if os.path.isfile('pviewer.config'):
lines = readST('pviewer.config')
self.mdapath = lines[0]
self.txtpath = lines[1]
print 'self.mdapath=', self.mdapath
print 'self.txtpath=', self.txtpath
else:
self.mdapath = os.curdir
self.txtpath = os.curdir
def closeup(self):
fo = open('pviewer.config','w')
st = [ self.mdapath,self.txtpath]
# print str(st)
fo.write(str(st))
fo.close()
self.quit()
# def addmenuBar(self):
# self.menuBar.addmenu('Setup','Fields for plot legend')
def addMoremenuBar(self):
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Quit',
command=self.closeup)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'Setup Printer ...',
label='Printer...',
command=self.printerDialog)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for Ascii File ...',
label='Open Ascii ...',
command=self.openAscii)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for MDA File ...',
label='Open MDA ...',
command=self.openMDA)
self.menuBar.addmenuitem('Help', 'command',
'Online help about this application ...',
label='pviewer_help.txt ...',
command=self.openHelpText)
self.menuBar.addmenuitem('Setup','command',
'Pick and load Color Table for 2D image plot ',
label='Color Table...',
command=self.setCTdialog)
self.menuBar.addmenuitem('Setup','command',
'Modify legend field names used in multiline plot',
label='Name Legends...',
command=self.legenddialog)
self.toggleUpdownVar=IntVar()
self.toggleUpdownVar.set(1)
self.menuBar.addmenuitem('Setup','checkbutton',
'Toggle plot2d updown mode',
label='Image Upside Down',
variable=self.toggleUpdownVar,
command=self.updownImage)
self.menuBar.addmenu('MDAView','Various MDAView features')
self.menuBar.addmenuitem('MDAView','command',
'Access 1D Array and pass to multiline plotter...',
label='Multi-line 1D Plot...',
command=self.mda1DRptPlot)
self.menuBar.addmenuitem('MDAView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAView','command',
'Access panimage window',
label='PanImages...',
command=self.getpanimage)
self.menuBar.addmenuitem('MDAView','command',
'Display 2D image for the select detector',
label='Pick Di Image...',
command=self.get2Ddetector)
self.menuBar.addmenu('MDAReports','Various Report features')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 1D/2D reports',
label='MDA 1D/2D Reports...',
command=self.mdaReport)
self.menuBar.addmenuitem('MDAReports','command',
'Generate sequential MDA 1D report from 2D array',
label='MDA 2D->1D Report...',
command=self.mda2D1DRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA report for current MDA directory',
label='Generate All MDA Report...',
command=self.mdaAllRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 2D report in IGOR format',
label='MDA to IGOR Report...',
command=self.mdaIGORRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Show ASCII Report Files',
label='View ASCII Report...',
command=self.showAscii)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Clear All Files in ASCII directory',
label='Remove All Reports...',
command=self.removeAscii)
self.menuBar.addmenu('AsciiView','Various AsciiView features')
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView','command',
'Enter the zero based X column # in ASCII file',
label='Multi-line Plotter...',
command=self.XcolDialog)
self.menuBar.addmenuitem('AsciiView','command',
'Pick line of DI legend name from the ascii file',
label='Extract & Modify Legend...',
command=self.DIlinedialog)
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView', 'command',
'Pass ascii text data to image plot ...',
label='TV Image ...',
command=self.imageAscii)
self.menuBar.addmenu('Ascii2Image','Plot2D Ascii Image features')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'No X,Y vector defined in ascii file',
label='Plot2d...',
command=self.plot2ddialog)
self.menuBar.addmenuitem('Ascii2Image', 'command',
'User set X,Y ranges dialog',
label='X,Y Range for image...',
command=self.XYrangeDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract the X,Y line vectors from mdaAscii generated file',
label='X,Y Line vector from mdaAscii file...',
command=self.XYrowcolDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract X,Y,Data from scanSee/catcher/yviewer generated file',
label='X column, Y line, DATA column from ascii file...',
command=self.XYVDialog)
self.menuBar.addmenu('ScanTools','Various scan programs')
self.menuBar.addmenuitem('ScanTools','command',
'Run plot.py python program',
label='Python plot.py ...',
command=self.runPlot)
self.menuBar.addmenuitem('ScanTools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm sscan (scanSee) program',
label='idlvm sscan ...',
command=self.runSscan)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm catcher (catcher) program',
label='idlvm catcher ...',
command=self.runCatcher)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm mca (MCA) program',
label='idlvm mca ...',
command=self.runMCA)
self.menuBar.addmenu('Tools','Various system tools')
self.menuBar.addmenuitem('Tools','command',
'Run start_epics program',
label='start_epics ...',
command=self.runMedm)
self.menuBar.addmenuitem('Tools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Tools', 'command',
'Enter any valid command syntax ...',
label='Command Dialog...',
command=self.commandDialog)
def runPlot(self):
os.system('plot.py & ')
def runSscan(self):
os.system('idlvm sscan & ')
def runCatcher(self):
os.system('idlvm catcher & ')
def runMCA(self):
os.system('idlvm mca & ')
def runMedm(self):
h = os.getenv('HOME')
os.system(h +'/start_epics & ')
def commandDialog(self):
cmd = commandSyntax(self.interior())
def printerDialog(self):
setupPrinter(self.interior())
def removeAscii(self):
from Dialog import *
# dir = os.getcwd() +os.sep+'ASCII'+os.sep+'*.txt'
dir = self.txtpath+os.sep+'*.txt'
dir = 'rm -fr '+dir
pa = {'title': 'Remove ASCII files',
'text': dir + '\n\n'
'All ascii text files will be removed\n'
'from the sub-directory ASCII.\n'
'Is it OK to remove all files ?\n ',
'bitmap': DIALOG_ICON,
'default': 1,
'strings': ('OK','Cancel')}
dialog = Dialog(self.interior(),pa)
ans = dialog.num
if ans == 0:
print dir
os.system(dir)
def showAscii(self):
fname = tkFileDialog.askopenfilename(initialdir=self.txtpath,initialfile="*txt*")
if fname == (): return
xdisplayfile(fname)
def mdaIGORRpt(self):
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2:
return
fname = self.mdafile
ofname = mdaAscii_IGOR(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mdaAllRpt(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text='MDA file from: '+self.mdapath,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.pack()
st.settext('Reports saved in: '+os.getcwd()+os.sep+'ASCII')
self.textWid=st
mdaAscii_all(self.mdapath)
def mda2D1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2: return
if d[2].nd == 0: return
fname = self.mdafile
ofname = mdaAscii_2D1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
def mda2DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
if d[1].nd > 0 :
ofname = mdaAscii_1D(d)
if d[0]['rank'] < 2: return
if d[2].nd == 0 : return
ofname = mdaAscii_2D(d)
py = d[1].p[0].data
px = d[2].p[0].data
px = px[0]
Scan['X'] = px
Scan['Y'] = py
Scan['txtfile'] = ofname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mda1DRptPlot(self):
self.mda1DRpt()
self.plotAscii()
def mdaReport(self):
d = self.MDA
if d[0]['rank'] == 1:
self.mda1DRpt()
if d[0]['rank'] >= 2:
self.mda2DRpt()
def mda1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
ofname = mdaAscii_1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def colorbar(self):
W = 256
clrbar =[]
for j in range(10):
clrbar.append(range(W))
clrbar = array(clrbar)
imagebar = PNGImage(self.canvas,clrbar,(2,2))
imagebar.pack(side='top')
self.imagebar = imagebar
def executeCT(self):
sels = self.textWid.getcurselection()
sels = string.split(sels[0])
CT_id = string.atoi(sels[0])
ps = str(CT[CT_id])
fo = open('pal.dat','wb')
fo.write(ps)
fo.close()
self.imagebar.destroy()
self.colorbar()
def setCTdialog(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
CT = readCT()
CT_id=39
frame = self.interior()
self.canvas = Canvas(frame,width=300,height=50)
self.canvas.pack()
self.colorbar()
dname=('0 B-W LINEAR','1 BLUE/WHITE','2 GRN-RED-BLU-WHT',
'3 RED TEMPERATURE','4 BLUE/GREEN/RED/YELLOW','5 STD GAMMA-II',
'6 PRISM','7 RED-PURPLE','8 GREEN/WHITE LINEAR',
'9 GRN/WHT EXPONENTIAL','10 GREEN-PINK','11 BLUE-RED',
'12 16-LEVEL','13 RAINBOW','14 STEPS',
'15 STERN SPECIAL','16 Haze','17 Blue-Pastel-Red',
'18 Pastels','19 Hue Sat Lightness1','20 Hue Sat Lightness2',
'21 Hue Sat Value 1','22 Hue Sat Value 2','23 Purple-Red + Stripes',
'24 Beach','25 Mac Style','26 Eos A',
'27 Eos B','28 Hardcandy','29 Nature',
'30 Ocean','31 Peppermint','32 Plasma',
'33 Blue-Red','34 Rainbow',
'35 Blue Waves','36 Volcano','37 Waves',
'38 Rainbow18','39 Rainbow + white','40 Rainbow + black')
box = Pmw.ScrolledListBox(frame,
labelpos=N,label_text='Color Table #',
items=dname,
listbox_height=5,vscrollmode='static',
selectioncommand= self.executeCT,
dblclickcommand= self.executeCT,
usehullsize=1, hull_width=400, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def selectionCommand(self):
box = self.textWid
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = self.MDA
pick2d(d,sel,updown=Scan['updown'])
def get2Ddetector(self):
if self.mdafile == '':
self.messageMDA()
return
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
root = self.interior()
d = self.MDA
nd = d[2].nd
dname =[]
for i in range(nd):
lst = str(i) + ' '+d[2].d[i].fieldName +' ' + d[2].d[i].name +' '+ d[2].d[i].desc +' '+d[2].d[i].unit
dname.append(lst)
box = Pmw.ScrolledListBox(root,
labelpos=N,label_text='2D Image Seq #',
items=(dname[0:nd]),
listbox_height=5,vscrollmode='static',
selectioncommand= self.selectionCommand,
dblclickcommand= self.selectionCommand,
usehullsize=1, hull_width=500, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def getpanimage(self):
file = self.mdafile
if file != '':
d = self.MDA
pal = readPalette()
if d[0]['rank'] > 1:
det2D(d[2].d[0:d[2].nd],scale=(1,1),columns=5,file=file,pal=pal)
else:
self.messageMDA()
def headerMDA(self,d,J,st_text):
try:
if d[J].nd > 0:
st_text = st_text+d[J].scan_name+'\n'
st_test = st_text+'NPTS: '+str(d[J].npts)+'\n'
st_test = st_text+'CURR_PT: '+str(d[J].curr_pt)+'\n'
st_text = st_text + '**'+str(J)+'D detectors**\n'
for i in range(d[J].nd):
st_text=st_text+d[J].d[i].fieldName+' : '+d[J].d[i].name+', '+d[J].d[i].desc+', '+d[J].d[i].unit+'\n'
except IndexError:
pass
return st_text
def openMDA(self):
fname = askopenfilename( initialdir=self.mdapath,
filetypes=[("MDA File", '.mda'),
("All Files","*")])
if fname =='':
return
self.mdafile = fname
(self.mdapath, fn) = os.path.split(fname)
d = readMDA(fname)
self.MDA = d
Scan['data'] = d
Scan['open'] = 1
st_text = 'Please use ViewMDA menu to access MDA 1D/2D data array\n\n'
try:
if d[1].nd > 0:
st_text = self.headerMDA(d,1,st_text)
if d[1].nd > 0:
V=[]
for i in range(85):
V.append('')
for i in range(d[1].nd):
V[i] = d[1].d[i].fieldName
file='pvs'
fd = open(file,'w')
fd.write(str(V))
fd.close()
except IndexError:
pass
try:
if d[2].nd > 0:
st_text = self.headerMDA(d,2,st_text)
except IndexError:
pass
try:
if d[3].nd > 0:
st_text = self.headerMDA(d,3,st_text)
except IndexError:
pass
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openHelpText(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
fname = os.environ['PYTHONSTARTUP']+os.sep+'pviewer_help.txt'
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openAscii(self):
fname = askopenfilename(initialdir=self.txtpath,
filetypes=[("ASCII Data", '.txt'),
("Image Files","*im*"),
("Data Files",".dat"),
("All Files","*")])
if fname == '':
return
(self.txtpath,fn) = os.path.split(fname)
Scan['txtfile'] = fname
self.textfile = fname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def imageAscii(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
TV(data)
else:
self.messageAscii()
def plot2ddialog(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
nr = len(data)
nc = len(data[0])
x = range(nc)
y = range(nr)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
else:
self.messageAscii()
def plotAscii(self):
if self.textfile == '':
self.messageAscii()
return
try:
os.system('plotAscii.py '+self.textfile+' &')
except AttributeError:
pass
def XYrowcolDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYdialog(self.interior())
def XYVDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYVdialog(self.interior())
def XYrangeDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = defineXYdialog(self.interior())
def XcolDialog(self):
if self.textfile == '':
self.messageAscii()
else:
Scan['txtfile'] = self.textfile
ix=GetXdialog(self.interior())
def legenddialog(self):
# dialog=GetLegends(self.interior())
GetLegends(self.interior())
def DIlinedialog(self):
file = Scan['txtfile']
if file == '': return
dialog=pickDIdialog(self.interior())
def updownImage(self):
Scan['updown'] = self.toggleUpdownVar.get()
def pick2Ddialog(self):
if Scan['open']:
dialog=pick2Ddetector(self.interior())
def createInterface(self):
AppShell.AppShell.createInterface(self)
self.addMoremenuBar()
# self.createButtons()
self.textWid = None
self.mdafile = ''
self.textfile = ''
self.startup()
if __name__ == '__main__':
SH = {'ix': 0, 'printer': '', 'font': 'Verdana 10 bold', }
if os.path.isfile('SH'):
SH = readSH()
else:
writeSH(SH)
Scan = { 'open': 0,
'2d': 0,
'updown': 1,
'1d': 0,
'nc': 0,
'CT': 39,
'rowcol': [3,2,4,2],
'txtfile': '',
'pvs1': None,
'pvs2': None,
'pvs3': None,
'X': None,
'Y': None,
'im': None,
'data': None }
CT = readCT()
pt = pviewer()
pt.run()
| bsd-2-clause | -3,291,895,447,585,921,000 | 27.987974 | 146 | 0.633361 | false | 2.725818 | false | false | false |
c0cky/mediathread | mediathread/projects/admin.py | 1 | 1386 | from django.contrib import admin
from django.contrib.auth.models import User
from mediathread.projects.models import Project
class ProjectAdmin(admin.ModelAdmin):
search_fields = ("title",
"participants__last_name", "author__username",
"participants__last_name")
list_display = ("title", "course", "author", "modified",
"date_submitted", "id", "project_type",
"response_view_policy")
filter_horizontal = ('participants',)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "author":
kwargs["queryset"] = User.objects.all().order_by('username')
return super(ProjectAdmin, self).formfield_for_foreignkey(db_field,
request,
**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == "participants":
kwargs["queryset"] = User.objects.all().order_by('username')
return super(ProjectAdmin, self).formfield_for_manytomany(db_field,
request,
**kwargs)
admin.site.register(Project, ProjectAdmin)
| gpl-2.0 | 7,909,254,326,937,188,000 | 43.709677 | 75 | 0.519481 | false | 4.95 | false | false | false |
lorensen/VTKExamples | src/Python/Deprecated/GeometricObjects/ParametricObjectsDemo.py | 1 | 5485 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
colors.SetColor("BkgColor", [26, 51, 102, 255])
parametricObjects = list()
parametricObjects.append(vtk.vtkParametricBoy())
parametricObjects.append(vtk.vtkParametricConicSpiral())
parametricObjects.append(vtk.vtkParametricCrossCap())
parametricObjects.append(vtk.vtkParametricDini())
parametricObjects.append(vtk.vtkParametricEllipsoid())
parametricObjects[-1].SetXRadius(0.5)
parametricObjects[-1].SetYRadius(2.0)
parametricObjects.append(vtk.vtkParametricEnneper())
parametricObjects.append(vtk.vtkParametricFigure8Klein())
parametricObjects.append(vtk.vtkParametricKlein())
parametricObjects.append(vtk.vtkParametricMobius())
parametricObjects[-1].SetRadius(2)
parametricObjects[-1].SetMinimumV(-0.5)
parametricObjects[-1].SetMaximumV(0.5)
parametricObjects.append(vtk.vtkParametricRandomHills())
parametricObjects[-1].AllowRandomGenerationOff()
parametricObjects.append(vtk.vtkParametricRoman())
parametricObjects.append(vtk.vtkParametricSuperEllipsoid())
parametricObjects[-1].SetN1(0.5)
parametricObjects[-1].SetN2(0.1)
parametricObjects.append(vtk.vtkParametricSuperToroid())
parametricObjects[-1].SetN1(0.2)
parametricObjects[-1].SetN2(3.0)
parametricObjects.append(vtk.vtkParametricTorus())
parametricObjects.append(vtk.vtkParametricSpline())
# Add some points to the parametric spline.
inputPoints = vtk.vtkPoints()
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070)
for i in range(0, 10):
rng.Next()
x = rng.GetRangeValue(0.0, 1.0)
rng.Next()
y = rng.GetRangeValue(0.0, 1.0)
rng.Next()
z = rng.GetRangeValue(0.0, 1.0)
inputPoints.InsertNextPoint(x, y, z)
parametricObjects[-1].SetPoints(inputPoints)
parametricFunctionSources = list()
renderers = list()
mappers = list()
actors = list()
textmappers = list()
textactors = list()
# Create one text property for all
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(12)
textProperty.SetJustificationToCentered()
backProperty = vtk.vtkProperty()
backProperty.SetColor(colors.GetColor3d("Tomato"))
# Create a parametric function source, renderer, mapper, and actor
# for each object
for i in range(0, len(parametricObjects)):
parametricFunctionSources.append(vtk.vtkParametricFunctionSource())
parametricFunctionSources[i].SetParametricFunction(parametricObjects[i])
parametricFunctionSources[i].SetUResolution(51)
parametricFunctionSources[i].SetVResolution(51)
parametricFunctionSources[i].SetWResolution(51)
parametricFunctionSources[i].Update()
mappers.append(vtk.vtkPolyDataMapper())
mappers[i].SetInputConnection(parametricFunctionSources[i].GetOutputPort())
actors.append(vtk.vtkActor())
actors[i].SetMapper(mappers[i])
actors[i].GetProperty().SetColor(colors.GetColor3d("Banana"))
actors[i].GetProperty().SetSpecular(.5)
actors[i].GetProperty().SetSpecularPower(20)
actors[i].SetBackfaceProperty(backProperty)
textmappers.append(vtk.vtkTextMapper())
textmappers[i].SetInput(parametricObjects[i].GetClassName())
textmappers[i].SetTextProperty(textProperty)
textactors.append(vtk.vtkActor2D())
textactors[i].SetMapper(textmappers[i])
textactors[i].SetPosition(100, 16)
renderers.append(vtk.vtkRenderer())
renderers[i].AddActor(actors[i])
renderers[i].AddActor(textactors[i])
renderers[i].SetBackground(colors.GetColor3d("BkgColor"))
# Setup the viewports
xGridDimensions = 4
yGridDimensions = 4
rendererSize = 200
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Parametric Objects Demonstration")
renderWindow.SetSize(rendererSize * xGridDimensions, rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(0, xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions,
float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions,
float(yGridDimensions - row) / yGridDimensions]
if index > (len(actors) - 1):
# Add a renderer even if there is no actor.
# This makes the render window background all the same color.
ren = vtk.vtkRenderer()
ren.SetBackground(colors.GetColor3d("BkgColor"))
ren.SetViewport(viewport)
renderWindow.AddRenderer(ren)
continue
renderers[index].SetViewport(viewport)
renderers[index].ResetCamera()
renderers[index].GetActiveCamera().Azimuth(30)
renderers[index].GetActiveCamera().Elevation(-30)
renderers[index].GetActiveCamera().Zoom(0.9)
renderers[index].ResetCameraClippingRange()
renderWindow.AddRenderer(renderers[index])
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | 3,249,549,297,955,955,000 | 36.827586 | 88 | 0.678213 | false | 3.728756 | false | false | false |
jamielennox/python-keystoneclient | keystoneclient/tests/test_cms.py | 1 | 6122 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import subprocess
import mock
import testresources
from testtools import matchers
from keystoneclient.common import cms
from keystoneclient import exceptions
from keystoneclient.tests import client_fixtures
from keystoneclient.tests import utils
class CMSTest(utils.TestCase, testresources.ResourcedTestCase):
"""Unit tests for the keystoneclient.common.cms module."""
resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)]
def test_cms_verify(self):
self.assertRaises(exceptions.CertificateConfigError,
cms.cms_verify,
'data',
'no_exist_cert_file',
'no_exist_ca_file')
def test_token_tocms_to_token(self):
with open(os.path.join(client_fixtures.CMSDIR,
'auth_token_scoped.pem')) as f:
AUTH_TOKEN_SCOPED_CMS = f.read()
self.assertEqual(cms.token_to_cms(self.examples.SIGNED_TOKEN_SCOPED),
AUTH_TOKEN_SCOPED_CMS)
tok = cms.cms_to_token(cms.token_to_cms(
self.examples.SIGNED_TOKEN_SCOPED))
self.assertEqual(tok, self.examples.SIGNED_TOKEN_SCOPED)
def test_asn1_token(self):
self.assertTrue(cms.is_asn1_token(self.examples.SIGNED_TOKEN_SCOPED))
self.assertFalse(cms.is_asn1_token('FOOBAR'))
def test_cms_sign_token_no_files(self):
self.assertRaises(subprocess.CalledProcessError,
cms.cms_sign_token,
self.examples.TOKEN_SCOPED_DATA,
'/no/such/file', '/no/such/key')
def test_cms_sign_token_no_files_pkiz(self):
self.assertRaises(subprocess.CalledProcessError,
cms.pkiz_sign,
self.examples.TOKEN_SCOPED_DATA,
'/no/such/file', '/no/such/key')
def test_cms_sign_token_success(self):
self.assertTrue(
cms.pkiz_sign(self.examples.TOKEN_SCOPED_DATA,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_KEY_FILE))
def test_cms_verify_token_no_files(self):
self.assertRaises(exceptions.CertificateConfigError,
cms.cms_verify,
self.examples.SIGNED_TOKEN_SCOPED,
'/no/such/file', '/no/such/key')
def test_cms_verify_token_no_oserror(self):
def raise_OSError(*args):
e = OSError()
e.errno = errno.EPIPE
raise e
with mock.patch('subprocess.Popen.communicate', new=raise_OSError):
try:
cms.cms_verify("x", '/no/such/file', '/no/such/key')
except exceptions.CertificateConfigError as e:
self.assertIn('/no/such/file', e.output)
self.assertIn('Hit OSError ', e.output)
else:
self.fail('Expected exceptions.CertificateConfigError')
def test_cms_verify_token_scoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_TOKEN_SCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_scoped_expired(self):
cms_content = cms.token_to_cms(
self.examples.SIGNED_TOKEN_SCOPED_EXPIRED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_unscoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_TOKEN_UNSCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_v3_scoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_v3_TOKEN_SCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_hash_token_no_token_id(self):
token_id = None
self.assertThat(cms.cms_hash_token(token_id), matchers.Is(None))
def test_cms_hash_token_not_pki(self):
"""If the token_id is not a PKI token then it returns the token_id."""
token = 'something'
self.assertFalse(cms.is_asn1_token(token))
self.assertThat(cms.cms_hash_token(token), matchers.Is(token))
def test_cms_hash_token_default_md5(self):
"""The default hash method is md5."""
token = self.examples.SIGNED_TOKEN_SCOPED
token_id_default = cms.cms_hash_token(token)
token_id_md5 = cms.cms_hash_token(token, mode='md5')
self.assertThat(token_id_default, matchers.Equals(token_id_md5))
# md5 hash is 32 chars.
self.assertThat(token_id_default, matchers.HasLength(32))
def test_cms_hash_token_sha256(self):
"""Can also hash with sha256."""
token = self.examples.SIGNED_TOKEN_SCOPED
token_id = cms.cms_hash_token(token, mode='sha256')
# sha256 hash is 64 chars.
self.assertThat(token_id, matchers.HasLength(64))
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
| apache-2.0 | -8,033,659,842,411,192,000 | 40.364865 | 78 | 0.601764 | false | 3.929397 | true | false | false |
rmed/textventures | src/textventures/instances/key_navigation.py | 1 | 4144 | # -*- coding: utf-8 -*-
# This file is part of TextVentures - https://github.com/RMed/textventures
#
# Copyright (C) 2013 Rafael Medina García <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import menu, sys
class Listener:
"""Gets user input for navigation."""
def __init__(self):
# Check for Windows platform
if sys.platform.startswith('win'):
import msvcrt
# Check for UNIX platforms
else:
import tty
def __call__(self):
# Windows
if sys.platform.startswith('win'):
import msvcrt
# Save character
char = msvcrt.getch()
# UNIX
else:
import tty, termios
# Read character
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
char = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# Return character
return char
class Action:
"""Check the input character and act accordingly."""
def __init__(self, input_char, action_type):
"""Arguments:
input_char -- pressed character
action_type -- type of the action (menu, load, etc)
"""
self.char = input_char.lower()
self.action = action_type
def __call__(self):
# Check the action type
if self.action == 'main':
# Main menu
if self.char == 'n':
# New game menu
menu.newgame_menu()
elif self.char == 'l':
# Load game menu
menu.load_menu()
elif self.char == 'o':
# Options menu
menu.options_menu()
elif self.char == 'h':
# Help menu
menu.help_menu()
elif self.char == 'a':
# About menu
menu.about_menu()
elif self.char == 'e':
# Exit program
sys.exit()
elif self.action == 'load':
# Load menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose game
return self.char
elif self.action == 'options':
# Load menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose language
return self.char
elif self.action == 'new':
# New game menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose game
return self.char
elif self.action == 'help':
# Help menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.action == 'about':
# About menu
if self.char == 'l':
menu.show_license()
elif self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.action == 'license':
# License
if self.char == 'b':
# Back to About menu
menu.about_menu()
| gpl-2.0 | 5,757,127,063,372,303,000 | 30.150376 | 74 | 0.502052 | false | 4.542763 | false | false | false |
banansson/cask | cask.py | 1 | 3281 | #!/usr/bin/python
import sys
import argparse
from os import path
from src.bag import Bag
from src.package import Package
from src.task import Task
from src.message import Message
from src.application import Application
from src.application_info import ApplicationInfo
from src.bootstrap import Bootstrap
from src import utils
def run(argv):
default_packs_dir = "~/.config/cask/packs"
default_target_dir = "~"
parser = argparse.ArgumentParser()
actions = parser.add_mutually_exclusive_group()
actions.add_argument('--version', action='store_true',
help='Display version')
actions.add_argument('--bootstrap', action='store_true',
help='run bootstrap test')
parser.add_argument('command', nargs='?', help='Command to run: list, query, install')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='be verbose')
parser.add_argument('-d', '--dryrun', action='store_true',
help='run in test mode, nothing is installed')
parser.add_argument('-s', '--source', action='store',
default=default_packs_dir,
help='override directory in which to look for packages')
parser.add_argument('-t', '--target', action='store',
default=default_target_dir,
help='override directory in which to install packages')
parser.add_argument('package', nargs='?', help='Name of package')
args = parser.parse_args()
verbose = args.verbose
message = Message(sys.stdout, verbose > 0)
if args.bootstrap:
bootstrap = Bootstrap()
if args.verbose:
bootstrap.verbose(message)
else:
verifications = bootstrap.verify_all()
if not verifications[0]:
message.info('Boostrap verification failed! Use verbose flag for more detailed output')
message.major('Errors:')
for error in verifications[1]:
message.minor(error)
else:
message.info('Boostrap verification succeeded')
return 0
appinfo = ApplicationInfo()
if args.version:
message.info(appinfo.name())
return 0
if not(args.command or args.package):
message.info("No package specified, use -h or --help for help. Listing of")
message.info("all packages can be done using the 'list' argument.")
return 0
(valid, source) = utils.try_lookup_dir(args.source)
if not valid:
message.error("No such directory: %s" % source)
return 0
message.plain("Looking for packages in: %s" % source)
target = utils.lookup_dir(args.target)
bag = Bag(path.abspath(source))
app = Application(bag, message, args)
commands = {}
commands['list'] = lambda bag, message, args: app.list(verbose)
commands['query'] = lambda bag, message, args: app.query(args.package, target)
commands['install'] = lambda bag, message, args: app.install(args.package, target, args.dryrun)
if len(args.command) == 0:
message.info("No action specified, use -h or --help for help.")
return 0
cmd = args.command
if cmd not in commands:
message.info('No such command: {:s}'.format(cmd))
return 0
commands[cmd](bag, message, args)
return 0
if __name__ == '__main__':
code = run(sys.argv)
exit(code)
| mit | 1,634,004,693,254,727,000 | 29.663551 | 97 | 0.650716 | false | 3.919952 | false | false | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Tools/scripts/pep384_macrocheck.py | 4 | 4720 | """
pep384_macrocheck.py
This programm tries to locate errors in the relevant Python header
files where macros access type fields when they are reachable from
the limided API.
The idea is to search macros with the string "->tp_" in it.
When the macro name does not begin with an underscore,
then we have found a dormant error.
Christian Tismer
2018-06-02
"""
import sys
import os
import re
DEBUG = False
def dprint(*args, **kw):
if DEBUG:
print(*args, **kw)
def parse_headerfiles(startpath):
"""
Scan all header files which are reachable fronm Python.h
"""
search = "Python.h"
name = os.path.join(startpath, search)
if not os.path.exists(name):
raise ValueError("file {} was not found in {}\n"
"Please give the path to Python's include directory."
.format(search, startpath))
errors = 0
with open(name) as python_h:
while True:
line = python_h.readline()
if not line:
break
found = re.match(r'^\s*#\s*include\s*"(\w+\.h)"', line)
if not found:
continue
include = found.group(1)
dprint("Scanning", include)
name = os.path.join(startpath, include)
if not os.path.exists(name):
name = os.path.join(startpath, "../PC", include)
errors += parse_file(name)
return errors
def ifdef_level_gen():
"""
Scan lines for #ifdef and track the level.
"""
level = 0
ifdef_pattern = r"^\s*#\s*if" # covers ifdef and ifndef as well
endif_pattern = r"^\s*#\s*endif"
while True:
line = yield level
if re.match(ifdef_pattern, line):
level += 1
elif re.match(endif_pattern, line):
level -= 1
def limited_gen():
"""
Scan lines for Py_LIMITED_API yes(1) no(-1) or nothing (0)
"""
limited = [0] # nothing
unlimited_pattern = r"^\s*#\s*ifndef\s+Py_LIMITED_API"
limited_pattern = "|".join([
r"^\s*#\s*ifdef\s+Py_LIMITED_API",
r"^\s*#\s*(el)?if\s+!\s*defined\s*\(\s*Py_LIMITED_API\s*\)\s*\|\|",
r"^\s*#\s*(el)?if\s+defined\s*\(\s*Py_LIMITED_API"
])
else_pattern = r"^\s*#\s*else"
ifdef_level = ifdef_level_gen()
status = next(ifdef_level)
wait_for = -1
while True:
line = yield limited[-1]
new_status = ifdef_level.send(line)
dir = new_status - status
status = new_status
if dir == 1:
if re.match(unlimited_pattern, line):
limited.append(-1)
wait_for = status - 1
elif re.match(limited_pattern, line):
limited.append(1)
wait_for = status - 1
elif dir == -1:
# this must have been an endif
if status == wait_for:
limited.pop()
wait_for = -1
else:
# it could be that we have an elif
if re.match(limited_pattern, line):
limited.append(1)
wait_for = status - 1
elif re.match(else_pattern, line):
limited.append(-limited.pop()) # negate top
def parse_file(fname):
errors = 0
with open(fname) as f:
lines = f.readlines()
type_pattern = r"^.*?->\s*tp_"
define_pattern = r"^\s*#\s*define\s+(\w+)"
limited = limited_gen()
status = next(limited)
for nr, line in enumerate(lines):
status = limited.send(line)
line = line.rstrip()
dprint(fname, nr, status, line)
if status != -1:
if re.match(define_pattern, line):
name = re.match(define_pattern, line).group(1)
if not name.startswith("_"):
# found a candidate, check it!
macro = line + "\n"
idx = nr
while line.endswith("\\"):
idx += 1
line = lines[idx].rstrip()
macro += line + "\n"
if re.match(type_pattern, macro, re.DOTALL):
# this type field can reach the limited API
report(fname, nr + 1, macro)
errors += 1
return errors
def report(fname, nr, macro):
f = sys.stderr
print(fname + ":" + str(nr), file=f)
print(macro, file=f)
if __name__ == "__main__":
p = sys.argv[1] if sys.argv[1:] else "../../Include"
errors = parse_headerfiles(p)
if errors:
# somehow it makes sense to raise a TypeError :-)
raise TypeError("These {} locations contradict the limited API."
.format(errors))
| apache-2.0 | -1,668,539,338,901,291,500 | 30.891892 | 75 | 0.523517 | false | 3.725335 | false | false | false |
yongfuyang/vnpy | vn.trader/ctaAlgo/ctaBase.py | 1 | 5912 | # encoding: UTF-8
'''
本文件中包含了CTA模块中用到的一些基础设置、类和常量等。
'''
from __future__ import division
# 把vn.trader根目录添加到python环境变量中
import sys
sys.path.append('..')
# 常量定义
# CTA引擎中涉及到的交易方向类型
CTAORDER_BUY = u'买开'
CTAORDER_SELL = u'卖平'
CTAORDER_SELLTODAY = u'卖平今'
CTAORDER_SELLYESTERDAY = u'卖平昨'
CTAORDER_SHORT = u'卖开'
CTAORDER_COVER = u'买平'
CTAORDER_COVERTODAY = u'买今平'
CTAORDER_COVERYESTERDAY = u'买平昨'
DIRECTION_LONG = u'多'
DIRECTION_SHORT = u'空'
# 本地停止单状态
STOPORDER_WAITING = u'等待中'
STOPORDER_CANCELLED = u'已撤销'
STOPORDER_TRIGGERED = u'已触发'
# 本地停止单前缀
STOPORDERPREFIX = 'CtaStopOrder.'
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
POSITION_DB_NAME = 'VnTrader_Position_Db'
BARSIZE_DICT = {}
BARSIZE_DICT = {
0 : 'tick',
1 : '1 secs',
2 : '5 secs',
3 : '15 secs',
4 : '30 secs',
5 : '1 min',
6 : '2 mins',
7 : '3 min',
8 : '5 mins',
9 : '15 mins',
10 : '30 mins',
11 : '1 hour',
12 : '1 day'
}
# 使用的缓存表
# 临时变量使用 barSize
BARSIZE_DFNAME_DICT = {}
BARSIZE_DFNAME_DICT = {
0 : 'df_tick',
1 : 'df_S_Bar',
2 : 'df_S5_Bar',
3 : 'df_S15_Bar',
4 : 'df_S30_Bar',
5 : 'df_M1_Bar',
6 : 'df_M2_Bar',
7 : 'df_M3_Bar',
8 : 'df_M5_Bar',
9 : 'df_M15_Bar',
10 : 'df_M30_Bar',
11 : 'df_H_Bar',
12 : 'df_D_Bar'
}
# BARSIZE 跟本地数据库名的对应关系
# 库名要同 ctaBase 一致
BARSIZE_DBNAME_DICT = {}
BARSIZE_DBNAME_DICT = {
0:'VnTrader_Tick_Db',
5:'VnTrader_1Min_Db',
8:'VnTrader_5Min_Db',
9: 'VnTrader_15Min_Db',
10: 'VnTrader_30Min_Db',
11: 'VnTrader_Hour_Db',
12: 'VnTrader_Daily_Db'
}
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
TICK_DB_NAME = 'VnTrader_Tick_Db'
DAILY_DB_NAME = 'VnTrader_Daily_Db'
MINUTE_DB_NAME = 'VnTrader_1Min_Db' # 分钟 数据库名称 原名是 : 'VnTrader_1Min_Db'
# 自己加上
HOUR_DB_NAME = 'VnTrader_Hour_Db'
MINUTE5_DB_NAME = 'VnTrader_5Min_Db'
MINUTE15_DB_NAME = 'VnTrader_15Min_Db'
MINUTE30_DB_NAME = 'VnTrader_30Min_Db'
# 引擎类型,用于区分当前策略的运行环境
ENGINETYPE_BACKTESTING = 'backtesting' # 回测
ENGINETYPE_TRADING = 'trading' # 实盘
# CTA引擎中涉及的数据类定义
from vtConstant import EMPTY_UNICODE, EMPTY_STRING, EMPTY_FLOAT, EMPTY_INT
########################################################################
class StopOrder(object):
"""本地停止单"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
self.orderType = EMPTY_UNICODE
self.direction = EMPTY_UNICODE
self.offset = EMPTY_UNICODE
self.price = EMPTY_FLOAT
self.volume = EMPTY_INT
self.strategy = None # 下停止单的策略对象
self.stopOrderID = EMPTY_STRING # 停止单的本地编号
self.status = EMPTY_STRING # 停止单状态
########################################################################
class CtaBarData(object):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
########################################################################
class CtaTickData(object):
"""Tick数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.volume = EMPTY_INT # 最新成交量
self.openInterest = EMPTY_INT # 持仓量
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# tick的时间
self.date = EMPTY_STRING # 日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT | mit | 8,423,452,578,316,974,000 | 25.412935 | 79 | 0.514883 | false | 2.730453 | false | false | false |
OpenToAllCTF/OTA-Challenge-Bot | server/consolethread.py | 1 | 1960 | import threading
from bottypes.invalid_console_command import InvalidConsoleCommand
from util.loghandler import log
class ConsoleThread(threading.Thread):
def __init__(self, botserver):
self.botserver = botserver
threading.Thread.__init__(self)
def update_config(self, option, value):
try:
self.botserver.set_config_option(option, value)
except InvalidConsoleCommand as e:
log.error(e)
def show_set_usage(self):
print("\nUsage: set <option> <value>")
print("")
print("Available options:")
if self.botserver.config:
for config_option in self.botserver.config:
print("{0:20} = {1}".format(config_option,
self.botserver.config[config_option]))
print("")
def quit(self):
"""Inform the application that it is quitting."""
log.info("Shutting down")
self.running = False
def run(self):
self.running = True
while self.running:
try:
parts = input("").split(" ")
cmd = parts[0].lower()
if cmd == "quit":
self.botserver.quit()
break
# Example command: Useless, but just an example, for what
# console handler could do
elif cmd == "createchannel":
if len(parts) < 2:
print("Usage: createchannel <channel>")
else:
self.botserver.slack_wrapper.create_channel(parts[1])
elif cmd == "set":
if len(parts) < 3:
self.show_set_usage()
else:
self.update_config(parts[1], parts[2])
except Exception:
log.exception("An error has occured while processing a console command")
| mit | -3,119,133,389,237,143,600 | 31.131148 | 88 | 0.512245 | false | 4.722892 | true | false | false |
Thomasvdw/ProgProject | Data/PVdata/add_sum_capacity_perdate.py | 1 | 4323 | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 20:06:01 2015
@author: Thomas
"""
# Python standard library imports
import csv
import os
def main():
sizes = []
for file in os.listdir("reformatted/"):
print file
size_total = []
size_2000 = []
size_2001 = []
size_2002 = []
size_2003 = []
size_2004 = []
size_2005 = []
size_2006 = []
size_2007 = []
size_2008 = []
size_2009 = []
size_2010 = []
size_2011 = []
size_2012 = []
size_2013 = []
size_2014 = []
size_2015 = []
name = "reformatted/" + file
with open(name, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = ",")
next(csvfile)
for row in reader:
date = str(row[4])
date = date[-4:]
try:
size = row[2]
size = float(size)
if size > 200 or len(str(size)) > 6:
size = 0
if date < "2015":
size_2015.append(size)
if date < "2014":
size_2014.append(size)
if date < "2013":
size_2013.append(size)
if date < "2012":
size_2012.append(size)
if date < "2011":
size_2011.append(size)
if date < "2010":
size_2010.append(size)
if date < "2009":
size_2009.append(size)
if date < "2008":
size_2008.append(size)
if date < "2007":
size_2007.append(size)
if date < "2006":
size_2006.append(size)
if date < "2005":
size_2005.append(size)
if date < "2004":
size_2004.append(size)
if date < "2003":
size_2003.append(size)
if date < "2002":
size_2002.append(size)
if date < "2001":
size_2001.append(size)
if date < "2000":
size_2000.append(size)
size_total.append(size)
except ValueError:
pass
size2015 = sum(size_2015)
size2014 = sum(size_2014)
size2013 = sum(size_2013)
size2012 = sum(size_2012)
size2011 = sum(size_2011)
size2010 = sum(size_2010)
size2009 = sum(size_2009)
size2008 = sum(size_2008)
size2007 = sum(size_2007)
size2006 = sum(size_2006)
size2005 = sum(size_2005)
size2004 = sum(size_2004)
size2003 = sum(size_2003)
size2002 = sum(size_2002)
size2001 = sum(size_2001)
size2000 = sum(size_2000)
sizetotal = sum(size_total)
all_sizes = [int(size2015), int(size2014), int(size2013), int(size2012),
int(size2011), int(size2010), int(size2009), int(size2008),
int(size2007), int(size2006), int(size2005), int(size2004),
int(size2003), int(size2002), int(size2001), int(size2000),
int(sizetotal)]
sizes.append(all_sizes)
dates = ['1/1/2015', '1/1/2014', '1/1/2013', '1/1/2012',
'1/1/2011', '1/1/2010', '1/1/2009', '1/1/2008',
'1/1/2007', '1/1/2006', '1/1/2005', '1/1/2004',
'1/1/2003', '1/1/2002', '1/1/2001', '1/1/2000', "total"]
for x, file in enumerate(os.listdir("reformatted/")):
name = "population_energy_growth/solar_size/" + "solar_size_" + file
with open(name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Date', 'Size'])
for i in range(17):
writer.writerow([dates[i], sizes[x][i]])
return sizes, dates
if __name__ == '__main__':
sizes, dates = main()
| mit | -6,894,113,067,600,910,000 | 31.757576 | 81 | 0.417303 | false | 4.070621 | false | false | false |
argvk/lastfmloved-syncer | update_banshee.py | 1 | 1928 | import sqlite3
import sys
import requests
import xml.dom.minidom
from os import path
con = None
artists = {}
url_params = {}
total_pages = -1
page_no = 0
user_name = sys.argv[1]
banshee_db = path.expanduser("~/.config/banshee-1/banshee.db")
con = sqlite3.connect(banshee_db)
cur = con.cursor()
while True:
if total_pages == page_no:
break
url_params['page'] = page_no
page_no = page_no + 1
r = requests.get("http://ws.audioscrobbler.com/2.0/user/" + user_name + "/lovedtracks.xml",params = url_params)
request_result = xml.dom.minidom.parseString(r.content)
if total_pages == -1:
total_pages = int(request_result.getElementsByTagName("lovedtracks")[0].attributes["totalPages"].value)
for track_data in request_result.getElementsByTagName("track"):
track_raw = track_data.getElementsByTagName("name")[0].firstChild.nodeValue
artist_raw = track_data.getElementsByTagName("name")[1].firstChild.nodeValue
track = track_raw.lower().replace("'","").replace(".","")
artist = artist_raw.lower().replace("'","").replace(".","")
print track,
print '|',
print artist,
print '|',
if artist not in artists:
cur.execute('SELECT ca.ArtistId FROM CoreArtists ca WHERE ca.NameLowered = ? LIMIT 1',(artist,))
row = cur.fetchone()
if row == None:
print 'artist not found'
continue
artists[artist] = row[0]
artist_id = artists[artist]
print artist_id,
print '|',
try:
with con:
cur.execute('UPDATE CoreTracks SET Rating = 5 WHERE CoreTracks.TitleLowered = ? AND CoreTracks.ArtistId = ? ', (track,artist_id,))
except sqlite3.Error, e:
print "error %s:" % e.args[0]
sys.exit(1)
print 'updated' ,cur.rowcount
if con:
con.close()
| mit | -6,483,506,008,347,363,000 | 25.777778 | 146 | 0.598029 | false | 3.658444 | false | false | false |
googleads/googleads-python-lib | examples/adwords/adwords_appengine_demo/views/add_campaign_view.py | 1 | 2373 | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles request to add a Campaign to a client account."""
import os
from handlers.api_handler import APIHandler
from handlers.ndb_handler import InitUser
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import template
class AddCampaign(webapp2.RequestHandler):
"""View that either adds a Campaign or displays an error message."""
def post(self):
"""Handle post request."""
client_customer_id = self.request.get('clientCustomerId')
campaign_name = self.request.get('campaignName')
ad_channel_type = self.request.get('adChannelType')
budget = self.request.get('budget')
template_values = {
'back_url': '/showCampaigns?clientCustomerId=%s' % client_customer_id,
'back_msg': 'View Campaigns',
'logout_url': users.create_logout_url('/'),
'user_nickname': users.get_current_user().nickname()
}
try:
app_user = InitUser()
# Load Client instance.
handler = APIHandler(app_user.client_id,
app_user.client_secret,
app_user.refresh_token,
app_user.adwords_manager_cid,
app_user.developer_token)
# Create new campaign.
handler.AddCampaign(client_customer_id, campaign_name,
ad_channel_type, budget)
self.redirect('/showCampaigns?clientCustomerId=%s' % client_customer_id)
except Exception as e:
template_values['error'] = str(e)
# Use template to write output to the page.
path = os.path.join(os.path.dirname(__file__),
'../templates/base_template.html')
self.response.out.write(template.render(path, template_values))
| apache-2.0 | 375,887,224,737,519,740 | 35.507692 | 78 | 0.664981 | false | 3.981544 | false | false | false |
xfaxca/pymlkit | pymlkit/models/regressors.py | 1 | 4199 | """
Module for custom regression model classes.
"""
from sklearn.base import BaseEstimator, RegressorMixin
"""
Rolling todo:
1. For AvgReg: Modify how parameters are used. Put them all into a dict. Also change X_train, y_train to just X,y
"""
class AveragingRegressor(BaseEstimator, RegressorMixin):
"""
Summary: A Meta-regressor that averages all predictions of it's consituent regressors. Analogous to
a majority vote classifer, but for regressoion
Attributes:
-------------
- regs: Base/Constituent regressors from which the average predictions are calculated
- reg_names: Names of the constituent regressors
- params: Optionally user-supplied initialization parameters for the
- base_predictions: Predictions of the constituent classifiers. This attribute is None until the predict method
is called
- avg_predictions: Average predictions calculated from the predictions of the constituent regressors.
"""
def __init__(self, regressors=None, regressor_names=None, init_parameters=None, verbose=0):
"""
Initialization
:param regressors: (obj list) - Constituent regressors of AveragingRegressor
:param regressor_names: (str list) - Names of the constituent regressors
:param init_parameters: (dict list) - initialization parameters for the corresponding regressors. These
must be passed as a list of dictionaries s.t. the parameters in each index are the corresponding
paramters for the regressor at the same index in the 'regressors' parameter. Can provide a partial
list, containing parameter dictionaries only for the first few regressors.
"""
self.params = {'regressors': regressors,
'regressor_names': regressor_names,
'init_parameters': init_parameters,
'verbose': verbose}
self.regs = regressors
self.reg_names = regressor_names
self.reg_params = init_parameters
self.verbose = verbose
self.base_predictions = None
self.avg_predictions = None
super().__init__()
super().set_params(**self.params)
# Return error if no constituent regressors are supplied
if regressors is None:
raise TypeError("Parameter 'regressors' should be a list of estimators with base scikit-learn regressor"
" methods.")
# Initialize constituent regressors with custom parameters if they are provided
if init_parameters is not None:
for i in range(len(self.reg_params)):
self.regs[i] = self.regs[i](**self.reg_params[i])
def fit(self, X_train, y_train=None):
"""
Method to fit all Regressors
:param X_train: (pandas df) - Training features
:param y_train: (pandas series) - Training target variable
:return: None
"""
print('=> Fitting AveragingRegressor:')
for i in range(len(self.regs)):
if self.verbose > 0:
print('==> Fitting %s' % self.reg_names[i])
self.regs[i].fit(X_train, y_train)
def predict(self, X_test):
"""
Method to predict target variable values. Final results are the average of all predictions
:param X_test: (pandas df) - Test features
:return: self.avg_predictions: (np.array) Average target variable predictions
"""
predictions = {}
average_predictions = np.zeros(shape=(len(X_test)), dtype=np.float64)
if len(self.reg_names) == len(self.regs):
add_names = True
for i in range(len(self.regs)):
y_pred = self.regs[i].predict(X_test)
average_predictions += y_pred
name = self.reg_names[i] if add_names else ('Regressor%i' % i)
predictions.setdefault(name, y_pred)
average_predictions /= float(len(self.regs))
predictions.setdefault('Average', average_predictions)
self.base_predictions = predictions
self.avg_predictions = average_predictions
return self.avg_predictions | gpl-3.0 | -5,591,416,426,703,481,000 | 43.680851 | 119 | 0.634675 | false | 4.237134 | false | false | false |
AlexeyKruglov/Skeinforge-fabmetheus | skeinforge_application/skeinforge_plugins/craft_plugins/multiply.py | 1 | 12265 | """
This page is in the table of contents.
The multiply plugin will take a single object and create an array of objects. It is used when you want to print single object multiple times in a single pass.
You can also position any object using this plugin by setting the center X and center Y to the desired coordinates (0,0 for the center of the print_bed) and setting the number of rows and columns to 1 (effectively setting a 1x1 matrix - printing only a single object).
The multiply manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Multiply
Besides using the multiply tool, another way of printing many copies of the model is to duplicate the model in Art of Illusion, however many times you want, with the appropriate offsets. Then you can either use the Join Objects script in the scripts submenu to create a combined shape or you can export the whole scene as an xml file, which skeinforge can then slice.
==Operation==
The default 'Activate Multiply' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Center===
Default is the origin.
The center of the shape will be moved to the "Center X" and "Center Y" coordinates.
====Center X====
====Center Y====
===Number of Cells===
====Number of Columns====
Default is one.
Defines the number of columns in the array table.
====Number of Rows====
Default is one.
Defines the number of rows in the table.
===Reverse Sequence every Odd Layer===
Default is off.
When selected the build sequence will be reversed on every odd layer so that the tool will travel less. The problem is that the builds would be made with different amount of time to cool, so some would be too hot and some too cold, which is why the default is off.
===Separation over Perimeter Width===
Default is fifteen.
Defines the ratio of separation between the shape copies over the edge width.
==Examples==
The following examples multiply the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and multiply.py.
> python multiply.py
This brings up the multiply dialog.
> python multiply.py Screw Holder Bottom.stl
The multiply tool is parsing the file:
Screw Holder Bottom.stl
..
The multiply tool has created the file:
.. Screw Holder Bottom_multiply.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText(fileName, text='', repository=None):
'Multiply the fill file or text.'
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
'Multiply the fill text.'
if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'multiply'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(MultiplyRepository())
if not repository.activateMultiply.value:
return gcodeText
return MultiplySkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return MultiplyRepository()
def writeOutput(fileName, shouldAnalyze=True):
'Multiply a gcode linear move file.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'multiply', shouldAnalyze)
class MultiplyRepository:
'A class to handle the multiply settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.multiply.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName(
fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Multiply', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Multiply')
self.activateMultiply = settings.BooleanSetting().getFromValue('Activate Multiply', self, False)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Center -', self )
self.centerX = settings.FloatSpin().getFromValue(-100.0, 'Center X (mm):', self, 100.0, 0.0)
self.centerY = settings.FloatSpin().getFromValue(-100.0, 'Center Y (mm):', self, 100.0, 0.0)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Number of Cells -', self)
self.numberOfColumns = settings.IntSpin().getFromValue(1, 'Number of Columns (integer):', self, 10, 1)
self.numberOfRows = settings.IntSpin().getFromValue(1, 'Number of Rows (integer):', self, 10, 1)
settings.LabelSeparator().getFromRepository(self)
self.reverseSequenceEveryOddLayer = settings.BooleanSetting().getFromValue('Reverse Sequence every Odd Layer', self, False)
self.separationOverEdgeWidth = settings.FloatSpin().getFromValue(5.0, 'Separation over Perimeter Width (ratio):', self, 25.0, 15.0)
self.executeTitle = 'Multiply'
def execute(self):
'Multiply button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(
self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class MultiplySkein:
'A class to multiply a skein of extrusions.'
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.isExtrusionActive = False
self.layerIndex = 0
self.layerLines = []
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.rowIndex = 0
self.shouldAccumulate = True
def addElement(self, offset):
'Add moved element to the output.'
for line in self.layerLines:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == '(<boundaryPoint>':
movedLocation = self.getMovedLocationSetOldLocation(offset, splitLine)
line = self.distanceFeedRate.getBoundaryLine(movedLocation)
elif firstWord == 'G1':
movedLocation = self.getMovedLocationSetOldLocation(offset, splitLine)
line = self.distanceFeedRate.getLinearGcodeMovement(movedLocation.dropAxis(), movedLocation.z)
elif firstWord == '(<infillPoint>':
movedLocation = self.getMovedLocationSetOldLocation(offset, splitLine)
line = self.distanceFeedRate.getInfillBoundaryLine(movedLocation)
self.distanceFeedRate.addLine(line)
def addLayer(self):
'Add multiplied layer to the output.'
self.addRemoveThroughLayer()
if not self.repository.reverseSequenceEveryOddLayer.value:
self.rowIndex = 0
for rowIndex in xrange(self.repository.numberOfRows.value):
yRowOffset = float(rowIndex) * self.extentPlusSeparation.imag
if self.layerIndex % 2 == 1 and self.repository.reverseSequenceEveryOddLayer.value:
yRowOffset = self.arrayExtent.imag - yRowOffset
for columnIndex in xrange(self.repository.numberOfColumns.value):
xColumnOffset = float(columnIndex) * self.extentPlusSeparation.real
if self.rowIndex % 2 == 1:
xColumnOffset = self.arrayExtent.real - xColumnOffset
self.addElement(complex(xColumnOffset, yRowOffset) + self.offset)
self.rowIndex += 1
settings.printProgress(self.layerIndex, 'multiply')
if len(self.layerLines) > 1:
self.layerIndex += 1
self.layerLines = []
def addRemoveThroughLayer(self):
'Parse gcode initialization and store the parameters.'
for layerLineIndex in xrange(len(self.layerLines)):
line = self.layerLines[layerLineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.addLine(line)
if firstWord == '(<layer>':
self.layerLines = self.layerLines[layerLineIndex + 1 :]
return
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the multiply gcode.'
self.centerOffset = complex(repository.centerX.value, repository.centerY.value)
self.repository = repository
self.numberOfColumns = repository.numberOfColumns.value
self.numberOfRows = repository.numberOfRows.value
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.setCorners()
for line in self.lines[self.lineIndex :]:
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def getMovedLocationSetOldLocation(self, offset, splitLine):
'Get the moved location and set the old location.'
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.oldLocation = location
return Vector3(location.x + offset.real, location.y + offset.imag, location.z)
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('multiply')
self.distanceFeedRate.addLine(line)
self.lineIndex += 1
return
elif firstWord == '(<edgeWidth>':
self.absoluteEdgeWidth = abs(float(splitLine[1]))
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
'Parse a gcode line and add it to the multiply skein.'
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == '(</layer>)':
self.addLayer()
self.distanceFeedRate.addLine(line)
return
elif firstWord == '(</crafting>)':
self.shouldAccumulate = False
if self.shouldAccumulate:
self.layerLines.append(line)
return
self.distanceFeedRate.addLine(line)
def setCorners(self):
'Set maximum and minimum corners and z.'
cornerMaximumComplex = complex(-987654321.0, -987654321.0)
cornerMinimumComplex = -cornerMaximumComplex
for line in self.lines[self.lineIndex :]:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
if self.isExtrusionActive:
locationComplex = location.dropAxis()
cornerMaximumComplex = euclidean.getMaximum(locationComplex, cornerMaximumComplex)
cornerMinimumComplex = euclidean.getMinimum(locationComplex, cornerMinimumComplex)
self.oldLocation = location
elif firstWord == 'M101':
self.isExtrusionActive = True
elif firstWord == 'M103':
self.isExtrusionActive = False
self.extent = cornerMaximumComplex - cornerMinimumComplex
self.shapeCenter = 0.5 * (cornerMaximumComplex + cornerMinimumComplex)
self.separation = self.repository.separationOverEdgeWidth.value * self.absoluteEdgeWidth
self.extentPlusSeparation = self.extent + complex(self.separation, self.separation)
columnsMinusOne = self.numberOfColumns - 1
rowsMinusOne = self.numberOfRows - 1
self.arrayExtent = complex(self.extentPlusSeparation.real * columnsMinusOne, self.extentPlusSeparation.imag * rowsMinusOne)
self.arrayCenter = 0.5 * self.arrayExtent
self.offset = self.centerOffset - self.arrayCenter - self.shapeCenter
def main():
'Display the multiply dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
| agpl-3.0 | 3,508,609,802,829,658,000 | 41.884615 | 368 | 0.76967 | false | 3.461756 | false | false | false |
QQuick/Transcrypt | transcrypt/development/automated_tests/transcrypt/module_itertools/__init__.py | 1 | 2899 | from itertools import *
from math import pow
def fibonacci():
a, b = 0, 1
for i in range (10):
yield a
a, b = b, a + b
squares = [i * i for i in range (10)]
chars = 'thequickbrownfoxjumpsoverthelazydog'
def run (autoTester):
autoTester.check ('islice count', list (islice (count (10, 2), 4, 40, 3)))
autoTester.check ('islice cycle', list (islice (cycle (fibonacci ()), 15)))
autoTester.check ('repeat', list (repeat (3.14, 15)))
autoTester.check ('islice repeat', list (islice (repeat (2.74), 15)))
autoTester.check ('accumulate', list (accumulate (range (5))))
def add (total, element):
return total + element
autoTester.check ('accumulate', list (accumulate (['alamak', 'mirach', 'sirrah'], add)))
autoTester.check ('chain', list (chain (fibonacci (), squares, chars)))
autoTester.check ('chain.from_iterable', list (chain.from_iterable (['ape', 'node', 'mice', 'vim', 'sus', 'jet'])))
selectors = [True, True, False, True, False, False, True, True, False, True]
autoTester.check ('compress', list (compress (
['{}{}'.format (('take' if selector else 'leave'), index) for index, selector in enumerate (selectors)],
selectors
)))
autoTester.check ('dropwhile', list (dropwhile (lambda x: x < 5, [1, 4, 6, 4, 1])))
autoTester.check ('filterfalse', list (filterfalse (lambda x: x % 2, range (10))))
things = [('animal', 'bear'), ('animal', 'duck'), ('plant', 'cactus'), ('vehicle', 'speed boat'), ('vehicle', 'school bus')]
for key, group in groupby (things, lambda x: x [0]):
for thing in group:
autoTester.check ('A {} is a {}.' .format (thing[1], key))
autoTester.check (' ')
autoTester.check ('islice', list (islice ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2, 9, 2)))
autoTester.check ('starmap', [int (x) for x in starmap (pow, [(2, 5), (3, 2), (10, 3)])])
autoTester.check ('takewhile', list (takewhile (lambda x: x < 5, [1, 4, 6, 4, 1])))
i1, i2 = tee (islice (count (), 5))
autoTester.check ('tee', list (i1), list (i1), list (i2))
autoTester.check ('product', list (product ('ABCD', 'xy')), list (product (range (2), repeat = 3)))
autoTester.check ('permutations', list (permutations ('ABCD')), list (permutations ('ABCD', 2)))
autoTester.check ('combinations',
list (combinations ('ABCD', 2)),
list (combinations ([1, 2, 3, 4, 5], 3)),
list (combinations (islice (count (), 6), 4))
)
autoTester.check ('combinations_with_replacement',
list (combinations_with_replacement ('ABCD', 2)),
list (combinations_with_replacement ([1, 2, 3, 4, 5], 3)),
list (combinations_with_replacement (islice (count (), 6), 4))
)
| apache-2.0 | 6,306,938,173,265,230,000 | 40.043478 | 128 | 0.563643 | false | 3.455304 | true | false | false |
talha81/TACTIC-DEV | src/tactic/ui/table/sobject_detail_wdg.py | 1 | 4954 | ###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['SObjectDetailElementWdg']
from pyasm.common import Environment
from pyasm.search import SearchKey
from pyasm.web import DivWdg
from pyasm.widget import IconWdg
from tactic.ui.widget import IconButtonWdg
from tactic.ui.common import BaseTableElementWdg
class SObjectDetailElementWdg(BaseTableElementWdg):
'''The element widget that displays according to type'''
ARGS_KEYS = {
'use_parent': {
'description': 'Display the parent of this sobject for the detail',
'type': 'SelectWdg',
'values': 'true|false',
'category': 'Options'
}
}
def __init__(my, **kwargs):
my.widget = None
super(SObjectDetailElementWdg, my).__init__(**kwargs)
def set_widget(my, widget):
my.widget = widget
def get_width(my):
return 50
def get_display(my):
sobject = my.get_current_sobject()
use_parent = my.get_option("use_parent")
use_parent = use_parent in ['true', True]
#if use_parent in ['true', True]:
# sobject = sobject.get_parent()
# if not sobject:
# return DivWdg()
my.search_key = SearchKey.get_by_sobject(sobject)
div = DivWdg()
div.add_class("hand")
#div.add_style("width: 100%")
#div.add_style("height: 100%")
target_id = "main_body"
title = "Show Item Details"
if my.widget:
widget = my.widget
else:
widget = IconButtonWdg(title=title, icon=IconWdg.ZOOM)
code = sobject.get_code()
widget.add_behavior( {
'type': 'click_up',
'search_key': my.search_key,
'use_parent': use_parent,
'code': code,
'cbjs_action': '''
spt.tab.set_main_body_tab();
var class_name = 'tactic.ui.tools.SObjectDetailWdg';
var kwargs = {
search_key: bvr.search_key,
use_parent: bvr.use_parent
};
var mode = 'xxx';
var layout = bvr.src_el.getParent(".spt_tool_top");
if (layout != null) {
mode = 'tool'
}
if (mode == 'tool') {
spt.app_busy.show("Loading ...");
var layout = bvr.src_el.getParent(".spt_tool_top");
var element = layout.getElement(".spt_tool_content");
spt.panel.load(element, class_name, kwargs);
spt.app_busy.hide();
}
else {
var element_name = "detail_"+bvr.code;
var title = "Detail ["+bvr.code+"]";
spt.tab.add_new(element_name, title, class_name, kwargs);
}
'''
} )
#link_wdg = my.get_link_wdg(target_id, title, widget)
#div.add( link_wdg )
div.add(widget)
return div
"""
def get_link_wdg(my, target_id, title, widget=None):
sobject = my.get_current_sobject()
path = "/%s" % my.search_key
options = {
'path': path,
'class_name': 'tactic.ui.panel.SObjectPanelWdg',
#'class_name': 'tactic.ui.panel.SearchTypePanelWdg',
'search_key': my.search_key
}
security = Environment.get_security()
if not security.check_access("url", path, "view"):
return
options['path'] = path
view_link_wdg = DivWdg(css="hand")
view_link_wdg.add_style( "padding-top: 5px" )
if widget:
view_link_wdg.add(widget)
else:
view_link_wdg.add(title)
# put in a default class name
if not options.get('class_name'):
options['class_name'] = "tactic.ui.panel.ViewPanelWdg"
# put in a default search
if not options.get('filters'):
options['filters'] = '0';
behavior = {
'type': 'click_up',
'cbfn_action': 'spt.side_bar.display_link_cbk',
'target_id': target_id,
'is_popup': 'true',
'options': options,
}
view_link_wdg.add_behavior( behavior )
# use shift click to open up in a popup
behavior = {
'type': 'click_up',
'mouse_btn': 'LMB',
'modkeys': 'SHIFT',
'cbfn_action': 'spt.side_bar.display_link_cbk',
'target_id': target_id, # FIXME: has to be here for now
'title': sobject.get_code(),
'is_popup': 'false',
'options': options,
}
view_link_wdg.add_behavior( behavior )
return view_link_wdg
"""
| epl-1.0 | 6,152,893,046,757,104,000 | 26.21978 | 75 | 0.525838 | false | 3.6187 | false | false | false |
JungeAlexander/cocoscore | setup.py | 1 | 2522 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='cocoscore',
version='1.0.0',
license='MIT license',
description='CoCoScore: context-aware co-occurrence scores for text mining applications',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Alexander Junge',
author_email='[email protected]',
url='https://github.com/JungeAlexander/cocoscore',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# eg: 'aspectlib==1.1.1', 'six>=1.7',
'pandas>=0.23.3',
'scikit-learn>=0.20.1',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
)
| mit | -8,443,466,161,578,863,000 | 32.626667 | 96 | 0.599524 | false | 3.781109 | false | false | false |
MikhailMS/Final_Project | music_generation_module/output_to_input.py | 1 | 1066 | # Import packages
import theano, theano.tensor as T
import numpy as np
# Import modules
from model_data import noteStateSingleToInputForm
# Main class
class OutputFormToInputFormOp(theano.Op):
# Properties attribute
__props__ = ()
def make_node(self, state, time):
state = T.as_tensor_variable(state)
time = T.as_tensor_variable(time)
"""Assumably there should be third variable that holds extra params
extra = T.as_tensor_variable(extra)
return theano.Apply(self, [state, time, extra], [T.bmatrix()])
"""
return theano.Apply(self, [state, time], [T.bmatrix()])
# Python implementation:
def perform(self, node, inputs_storage, output_storage):
state, time = inputs_storage
output_storage[0][0] = np.array(noteStateSingleToInputForm(state, time), dtype='int8')
"""Taking third parameter into account:
state, time, extra = inputs_storage
output_storage[0][0][0] = np.array(noteStateSingleToInputForm(state, time, extra), dtype='int8')
"""
| bsd-2-clause | 1,084,220,755,746,954,400 | 34.533333 | 104 | 0.660413 | false | 3.753521 | false | false | false |
Ophiuchus1312/enigma2-master | lib/python/Components/RcModel.py | 1 | 6258 | from enigma import getBoxType
from Tools.StbHardware import getFPVersion
import os
class RcModel:
RCTYPE_DMM = 0
RCTYPE_DMM1 = 1
RCTYPE_DMM2 = 2
RCTYPE_E3HD = 3
RCTYPE_EBOX5000 = 4
RCTYPE_ET4X00 = 5
RCTYPE_ET6X00 = 6
RCTYPE_ET6500 = 7
RCTYPE_ET9X00 = 8
RCTYPE_ET9500 = 9
RCTYPE_GB = 10
RCTYPE_INI0 = 11
RCTYPE_INI1 = 12
RCTYPE_INI2 = 13
RCTYPE_INI3 = 14
RCTYPE_IQON = 15
RCTYPE_IXUSSONE = 16
RCTYPE_IXUSSZERO = 17
RCTYPE_ODINM7 = 18
RCTYPE_ODINM9 = 19
RCTYPE_TM = 20
RCTYPE_VU = 21
RCTYPE_VU2 = 22
RCTYPE_XP1000 = 23
def __init__(self):
self.currentRcType = self.RCTYPE_DMM
self.readRcTypeFromProc()
def rcIsDefault(self):
if self.currentRcType != self.RCTYPE_DMM:
return False
return True
def readFile(self, target):
fp = open(target, 'r')
out = fp.read()
fp.close()
return out.split()[0]
def readRcTypeFromProc(self):
if os.path.exists('/proc/stb/info/hwmodel'):
model = self.readFile('/proc/stb/info/hwmodel')
if model == 'tmtwinoe' or model == 'tm2toe' or model == 'tmsingle' or model == 'tmnanooe':
self.currentRcType = self.RCTYPE_TM
elif model == 'ios100hd' or model == 'ios200hd' or model == 'ios300hd':
self.currentRcType = self.RCTYPE_IQON
elif getBoxType() == 'dm8000':
self.currentRcType = self.RCTYPE_DMM
elif getBoxType() == 'dm7020hd':
self.currentRcType = self.RCTYPE_DMM2
elif getBoxType() == 'dm800' or getBoxType() == 'dm800se' or getBoxType() == 'dm500hd':
self.currentRcType = self.RCTYPE_DMM1
elif os.path.exists('/proc/stb/info/boxtype'):
model = self.readFile('/proc/stb/info/boxtype')
if model.startswith('et') or model.startswith('xp'):
rc = self.readFile('/proc/stb/ir/rc/type')
if rc == '3':
self.currentRcType = self.RCTYPE_ODINM9
if rc == '4':
self.currentRcType = self.RCTYPE_DMM
elif rc == '5':
self.currentRcType = self.RCTYPE_ET9X00
elif rc == '6':
self.currentRcType = self.RCTYPE_DMM
elif rc == '7':
self.currentRcType = self.RCTYPE_ET6X00
elif rc == '8':
self.currentRcType = self.RCTYPE_VU
elif rc == '9' and model == 'et9500':
self.currentRcType = self.RCTYPE_ET9500
elif rc == '9' and model == 'et6500':
self.currentRcType = self.RCTYPE_ET6500
elif rc == '11' and model == 'et9200':
self.currentRcType = self.RCTYPE_ET9500
elif rc == '11' and model == 'et9000':
self.currentRcType = self.RCTYPE_ET9x00
elif rc == '13' and model == 'et4000':
self.currentRcType = self.RCTYPE_ET4X00
elif rc == '14':
self.currentRcType = self.RCTYPE_XP1000
elif model == 'ebox5000' or model == 'ebox5100' or model == 'ebox7358':
self.currentRcType = self.RCTYPE_EBOX5000
elif model == 'gigablue':
self.currentRcType = self.RCTYPE_GB
elif model == 'ini-3000':
fp_version = str(getFPVersion())
if fp_version.startswith('1'):
self.currentRcType = self.RCTYPE_INI0
else:
self.currentRcType = self.RCTYPE_INI2
elif model == 'ini-5000' or model == 'ini-7000' or model == 'ini-7012':
self.currentRcType = self.RCTYPE_INI1
elif model == 'ini-1000' or model == 'ini-5000R':
self.currentRcType = self.RCTYPE_INI2
elif model == 'ini-5000sv':
self.currentRcType = self.RCTYPE_INI3
elif model == 'e3hd':
self.currentRcType = self.RCTYPE_E3HD
elif model == 'odinm9':
self.currentRcType = self.RCTYPE_ODINM9
elif model == 'odinm7':
self.currentRcType = self.RCTYPE_ODINM7
elif model.startswith('Ixuss'):
if getBoxType() == 'ixussone':
self.currentRcType = self.RCTYPE_IXUSSONE
elif getBoxType() == 'ixusszero':
self.currentRcType = self.RCTYPE_IXUSSZERO
elif os.path.exists('/proc/stb/info/vumodel'):
model = self.readFile('/proc/stb/info/vumodel')
if model == 'ultimo':
self.currentRcType = self.RCTYPE_VU2
else:
self.currentRcType = self.RCTYPE_VU
def getRcLocation(self):
if self.currentRcType == self.RCTYPE_DMM:
return '/usr/share/enigma2/rc_models/dmm0/'
elif self.currentRcType == self.RCTYPE_DMM1:
return '/usr/share/enigma2/rc_models/dmm1/'
elif self.currentRcType == self.RCTYPE_DMM2:
return '/usr/share/enigma2/rc_models/dmm2/'
elif self.currentRcType == self.RCTYPE_E3HD:
return '/usr/share/enigma2/rc_models/e3hd/'
elif self.currentRcType == self.RCTYPE_EBOX5000:
return '/usr/share/enigma2/rc_models/ebox5000/'
elif self.currentRcType == self.RCTYPE_ET4X00:
return '/usr/share/enigma2/rc_models/et4x00/'
elif self.currentRcType == self.RCTYPE_ET6X00:
return '/usr/share/enigma2/rc_models/et6x00/'
elif self.currentRcType == self.RCTYPE_ET6500:
return '/usr/share/enigma2/rc_models/et6500/'
elif self.currentRcType == self.RCTYPE_ET9X00:
return '/usr/share/enigma2/rc_models/et9x00/'
elif self.currentRcType == self.RCTYPE_ET9500:
return '/usr/share/enigma2/rc_models/et9500/'
elif self.currentRcType == self.RCTYPE_GB:
return '/usr/share/enigma2/rc_models/gb/'
elif self.currentRcType == self.RCTYPE_INI0:
return '/usr/share/enigma2/rc_models/ini0/'
elif self.currentRcType == self.RCTYPE_INI1:
return '/usr/share/enigma2/rc_models/ini1/'
elif self.currentRcType == self.RCTYPE_INI2:
return '/usr/share/enigma2/rc_models/ini2/'
elif self.currentRcType == self.RCTYPE_INI2:
return '/usr/share/enigma2/rc_models/ini3/'
elif self.currentRcType == self.RCTYPE_IQON:
return '/usr/share/enigma2/rc_models/iqon/'
elif self.currentRcType == self.RCTYPE_IXUSSONE:
return '/usr/share/enigma2/rc_models/ixussone/'
elif self.currentRcType == self.RCTYPE_IXUSSZERO:
return '/usr/share/enigma2/rc_models/ixusszero/'
elif self.currentRcType == self.RCTYPE_ODINM9:
return '/usr/share/enigma2/rc_models/odinm9/'
elif self.currentRcType == self.RCTYPE_ODINM7:
return '/usr/share/enigma2/rc_models/odinm7/'
elif self.currentRcType == self.RCTYPE_TM:
return '/usr/share/enigma2/rc_models/tm/'
elif self.currentRcType == self.RCTYPE_VU:
return '/usr/share/enigma2/rc_models/vu/'
elif self.currentRcType == self.RCTYPE_VU2:
return '/usr/share/enigma2/rc_models/vu2/'
elif self.currentRcType == self.RCTYPE_XP1000:
return '/usr/share/enigma2/rc_models/xp1000/'
rc_model = RcModel()
| gpl-2.0 | 3,291,528,724,807,571,000 | 35.383721 | 93 | 0.683605 | false | 2.555329 | false | false | false |
jfly/libtnoodle | tools/c_to_emscripten.py | 1 | 3643 | #!/usr/bin/env python2
import json
import argparse
import collections
import clang.cindex
from clang.cindex import TypeKind
from clang.cindex import CursorKind
Function = collections.namedtuple('Function', ['name', 'returnType', 'argumentTypes'])
Constant = collections.namedtuple('Constant', ['name', 'value'])
def getJavascriptType(t):
if t.kind == TypeKind.TYPEDEF:
return getJavascriptType(t.get_canonical())
elif t.kind == TypeKind.POINTER:
pointee = t.get_pointee()
if pointee.kind == TypeKind.CHAR_S:
return "string"
else:
assert False # unrecognized pointer type
elif t.kind in [ TypeKind.INT, TypeKind.UINT, TypeKind.LONG ]:
return "number"
else:
assert False # unrecognized type
def getFunctionsAndConstants(node, filename):
if node.kind == CursorKind.FUNCTION_DECL:
args = []
for arg in node.get_arguments():
jsType = getJavascriptType(arg.type)
args.append(jsType)
jsReturnType = getJavascriptType(node.result_type)
return [ Function( node.spelling, jsReturnType, args ) ], []
elif node.kind == CursorKind.MACRO_DEFINITION:
if node.location.file is not None and node.location.file.name == filename:
tokens = list(node.get_tokens())
# We're only interested in stuff like
# #define PI 3.14
# not
# #define CNOODLE_H
if len(tokens) == 3:
identifier, literal, hsh = tokens
return [], [ Constant(identifier.spelling, literal.spelling) ]
# Recurse for children of this node
funcs = []
consts = []
for c in node.get_children():
fs, cs = getFunctionsAndConstants(c, filename)
funcs += fs
consts += cs
return funcs, consts
def main():
parser = argparse.ArgumentParser(description='Produce Emscripten wrapper code for a C header file.')
parser.add_argument('file', type=argparse.FileType('r'), help='C header file to parse')
parser.add_argument('action', choices=[ "exported", "cwrap" ])
args = parser.parse_args()
index = clang.cindex.Index.create()
tu = index.parse(args.file.name, options=clang.cindex.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD)
funcs, consts = getFunctionsAndConstants(tu.cursor, args.file.name)
if args.action == "cwrap":
prefixes = set()
js = ""
for f in funcs:
prefix, shortName = f.name.split("_", 1)
prefixes.add(prefix)
funcData = {}
funcData['prefix'] = prefix
funcData['shortName'] = shortName
funcData['name'] = f.name
funcData['returnType'] = f.returnType
funcData['argumentTypes'] = json.dumps(f.argumentTypes)
js += '{prefix}.{shortName} = Module.cwrap("{name}", "{returnType}", {argumentTypes});\n'.format(**funcData)
for c in consts:
prefix, shortName = c.name.split("_", 1)
prefix = prefix.lower()
constData = {
'prefix': prefix,
'shortName': shortName,
'value': c.value,
}
js += "{prefix}.{shortName} = {value};\n".format(**constData)
for prefix in prefixes:
js = "var {0} = {0} || {{}};\n".format(prefix) + js
print js,
elif args.action == "exported":
funcNames = [ "_%s" % f.name for f in funcs ]
exported = 'EXPORTED_FUNCTIONS=%s' % json.dumps( funcNames )
print exported
else:
assert False
if __name__ == "__main__":
main()
| gpl-3.0 | -3,406,530,499,207,695,000 | 36.556701 | 120 | 0.591545 | false | 3.896257 | false | false | false |
eyeseast/django-scrivo | scrivo/tests/views.py | 1 | 3316 | import datetime
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from scrivo.models import Post
from scrivo.settings import DEFAULT_PAGINATE_BY, INDEX_POST_COUNT
from scrivo.tests.base import BlogPostTest, f
class PostViewTest(BlogPostTest):
def setUp(self):
self.user = self.create_user()
# let's make 100 fake posts
self.date_range = []
date = datetime.datetime(2011, 1, 1)
for i in range(100):
self.create_post(
title="Test %s" % i,
author = self.user,
published = date,
status = Post.STATUS.public
)
# incriment the date
self.date_range.append(date)
date += datetime.timedelta(days=1)
def test_archive_index(self):
response = self.client.get(reverse('scrivo_archive_index'))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
self.assertEqual(posts.count(), INDEX_POST_COUNT)
def test_year_archive(self):
response = self.client.get(reverse('scrivo_year_archive', args=[2011]))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
paginator = response.context['paginator']
if not paginator:
self.fail("Not paginated")
# check that we're paginating right
self.assertEqual(posts.count(), DEFAULT_PAGINATE_BY)
# and that we have the right total
self.assertEqual(paginator.count, 100)
def test_month_archive(self):
response = self.client.get(reverse('scrivo_month_archive', args=[2011, 'jan']))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
paginator = response.context['paginator']
if not paginator:
self.fail("Not paginated")
self.assertEqual(len(posts), DEFAULT_PAGINATE_BY)
self.assertEqual(paginator.count, 31) # for january
def test_day_archive(self):
response = self.client.get(reverse('scrivo_day_archive', args=[2011, 'jan', 5]))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
# since we're doing one post per day
self.assertEqual(len(posts), 1)
def test_post_details(self):
"""
Loop through posts and test that we have a valid view
for each day, and that everything works. Since every post
should be public, we should be able to use Post.objects.all()
"""
for post in Post.objects.all():
response = self.client.get(post.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(post, response.context['post'])
| mit | -8,118,935,701,026,106,000 | 32.846939 | 88 | 0.587455 | false | 4.273196 | true | false | false |
theonlydude/RandomMetroidSolver | rando/ItemLocContainer.py | 1 | 10007 |
import copy, utils.log
from logic.smbool import SMBool, smboolFalse
from logic.smboolmanager import SMBoolManager
from collections import Counter
class ItemLocation(object):
__slots__ = ( 'Item', 'Location', 'Accessible' )
def __init__(self, Item=None, Location=None, accessible=True):
self.Item = Item
self.Location = Location
self.Accessible = accessible
def json(self):
return {'Item': self.Item.json(), 'Location': self.Location.json()}
def getItemListStr(items):
return str(dict(Counter(["%s/%s" % (item.Type,item.Class) for item in items])))
def getLocListStr(locs):
return str([loc.Name for loc in locs])
def getItemLocStr(itemLoc):
return itemLoc.Item.Type + " at " + itemLoc.Location.Name
def getItemLocationsStr(itemLocations):
return str([getItemLocStr(il) for il in itemLocations])
class ContainerSoftBackup(object):
def __init__(self, container):
self.itemLocations = container.itemLocations[:]
self.itemPool = container.itemPool[:]
self.unusedLocations = container.unusedLocations[:]
self.currentItems = container.currentItems[:]
def restore(self, container, resetSM=True):
# avoid costly deep copies of locations
container.itemLocations = self.itemLocations[:]
container.itemPool = self.itemPool[:]
container.unusedLocations = self.unusedLocations[:]
container.currentItems = self.currentItems[:]
if resetSM:
container.sm.resetItems()
container.sm.addItems([it.Type for it in container.currentItems])
# Holds items yet to place (itemPool), locations yet to fill (unusedLocations),
# placed items/locations (itemLocations).
# If logic is needed, also holds a SMBoolManager (sm) and collected items so far
# (collectedItems)
class ItemLocContainer(object):
def __init__(self, sm, itemPool, locations):
self.sm = sm
self.itemLocations = []
self.unusedLocations = locations
self.currentItems = []
self.itemPool = itemPool
self.itemPoolBackup = None
self.unrestrictedItems = set()
self.log = utils.log.get('ItemLocContainer')
self.checkConsistency()
def checkConsistency(self):
assert len(self.unusedLocations) == len(self.itemPool), "Item({})/Locs({}) count mismatch".format(len(self.itemPool), len(self.unusedLocations))
def __eq__(self, rhs):
eq = self.currentItems == rhs.currentItems
eq &= getLocListStr(self.unusedLocations) == getLocListStr(rhs.unusedLocations)
eq &= self.itemPool == rhs.itemPool
eq &= getItemLocationsStr(self.itemLocations) == getItemLocationsStr(rhs.itemLocations)
return eq
def __copy__(self):
locs = copy.copy(self.unusedLocations)
# we don't copy restriction state on purpose: it depends on
# outside context we don't want to bring to the copy
ret = ItemLocContainer(SMBoolManager(),
self.itemPoolBackup[:] if self.itemPoolBackup != None else self.itemPool[:],
locs)
ret.currentItems = self.currentItems[:]
ret.unrestrictedItems = copy.copy(self.unrestrictedItems)
ret.itemLocations = [ ItemLocation(
il.Item,
copy.copy(il.Location)
) for il in self.itemLocations ]
ret.sm.addItems([item.Type for item in ret.currentItems])
return ret
# create a new container based on slice predicates on items and
# locs. both predicates must result in a consistent container
# (same number of unused locations and not placed items)
def slice(self, itemPoolCond, locPoolCond):
assert self.itemPoolBackup is None, "Cannot slice a constrained container"
locs = self.getLocs(locPoolCond)
items = self.getItems(itemPoolCond)
cont = ItemLocContainer(self.sm, items, locs)
cont.currentItems = self.currentItems
cont.itemLocations = self.itemLocations
return copy.copy(cont)
# transfer collected items/locations to another container
def transferCollected(self, dest):
dest.currentItems = self.currentItems[:]
dest.sm = SMBoolManager()
dest.sm.addItems([item.Type for item in dest.currentItems])
dest.itemLocations = copy.copy(self.itemLocations)
dest.unrestrictedItems = copy.copy(self.unrestrictedItems)
# reset collected items/locations. if reassignItemLocs is True,
# will re-fill itemPool and unusedLocations as they were before
# collection
def resetCollected(self, reassignItemLocs=False):
self.currentItems = []
if reassignItemLocs == False:
self.itemLocations = []
else:
while len(self.itemLocations) > 0:
il = self.itemLocations.pop()
self.itemPool.append(il.Item)
self.unusedLocations.append(il.Location)
self.unrestrictedItems = set()
self.sm.resetItems()
def dump(self):
return "ItemPool(%d): %s\nLocPool(%d): %s\nCollected: %s" % (len(self.itemPool), getItemListStr(self.itemPool), len(self.unusedLocations), getLocListStr(self.unusedLocations), getItemListStr(self.currentItems))
# temporarily restrict item pool to items fulfilling predicate
def restrictItemPool(self, predicate):
assert self.itemPoolBackup is None, "Item pool already restricted"
self.itemPoolBackup = self.itemPool
self.itemPool = [item for item in self.itemPoolBackup if predicate(item)]
self.log.debug("restrictItemPool: "+getItemListStr(self.itemPool))
# remove a placed restriction
def unrestrictItemPool(self):
assert self.itemPoolBackup is not None, "No pool restriction to remove"
self.itemPool = self.itemPoolBackup
self.itemPoolBackup = None
self.log.debug("unrestrictItemPool: "+getItemListStr(self.itemPool))
def removeLocation(self, location):
if location in self.unusedLocations:
self.unusedLocations.remove(location)
def removeItem(self, item):
self.itemPool.remove(item)
if self.itemPoolBackup is not None:
self.itemPoolBackup.remove(item)
# collect an item at a location. if pickup is True, also affects logic (sm) and collectedItems
def collect(self, itemLocation, pickup=True):
item = itemLocation.Item
location = itemLocation.Location
if not location.restricted:
self.unrestrictedItems.add(item.Type)
if pickup == True:
self.currentItems.append(item)
self.sm.addItem(item.Type)
self.removeLocation(location)
self.itemLocations.append(itemLocation)
self.removeItem(item)
def isPoolEmpty(self):
return len(self.itemPool) == 0
def getNextItemInPool(self, t):
return next((item for item in self.itemPool if item.Type == t), None)
def getNextItemInPoolMatching(self, predicate):
return next((item for item in self.itemPool if predicate(item) == True), None)
def hasItemTypeInPool(self, t):
return any(item.Type == t for item in self.itemPool)
def hasItemInPool(self, predicate):
return any(predicate(item) == True for item in self.itemPool)
def hasItemCategoryInPool(self, cat):
return any(item.Category == cat for item in self.itemPool)
def getNextItemInPoolFromCategory(self, cat):
return next((item for item in self.itemPool if item.Category == cat), None)
def getAllItemsInPoolFromCategory(self, cat):
return [item for item in self.itemPool if item.Category == cat]
def countItemTypeInPool(self, t):
return sum(1 for item in self.itemPool if item.Type == t)
def countItems(self, predicate):
return sum(1 for item in self.itemPool if predicate(item) == True)
# gets the items pool in the form of a dicitionary whose keys are item types
# and values list of items of this type
def getPoolDict(self):
poolDict = {}
for item in self.itemPool:
if item.Type not in poolDict:
poolDict[item.Type] = []
poolDict[item.Type].append(item)
return poolDict
def getLocs(self, predicate):
return [loc for loc in self.unusedLocations if predicate(loc) == True]
def getItems(self, predicate):
return [item for item in self.itemPool if predicate(item) == True]
def getUsedLocs(self, predicate):
return [il.Location for il in self.itemLocations if predicate(il.Location) == True]
def getItemLoc(self, loc):
for il in self.itemLocations:
if il.Location == loc:
return il
def getCollectedItems(self, predicate):
return [item for item in self.currentItems if predicate(item) == True]
def hasUnrestrictedLocWithItemType(self, itemType):
return itemType in self.unrestrictedItems
def getLocsForSolver(self):
locs = []
for il in self.itemLocations:
loc = il.Location
self.log.debug("getLocsForSolver: {}".format(loc.Name))
# filter out restricted locations
if loc.restricted:
self.log.debug("getLocsForSolver: restricted, remove {}".format(loc.Name))
continue
loc.itemName = il.Item.Type
locs.append(loc)
return locs
def cleanLocsAfterSolver(self):
# restricted locs can have their difficulty set, which can cause them to be reported in the
# post randomization warning message about locs with diff > max diff.
for il in self.itemLocations:
loc = il.Location
if loc.restricted and loc.difficulty == True:
loc.difficulty = smboolFalse
def getDistinctItems(self):
itemTypes = {item.Type for item in self.itemPool}
return [self.getNextItemInPool(itemType) for itemType in itemTypes]
| gpl-3.0 | 7,744,803,922,275,245,000 | 39.350806 | 218 | 0.662137 | false | 4.049777 | false | false | false |
brainwane/zulip | zerver/views/submessage.py | 2 | 1118 | import orjson
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import REQ, has_request_variables
from zerver.lib.actions import do_add_submessage
from zerver.lib.message import access_message
from zerver.lib.response import json_error, json_success
from zerver.lib.validator import check_int
from zerver.models import UserProfile
@has_request_variables
def process_submessage(request: HttpRequest,
user_profile: UserProfile,
message_id: int=REQ(validator=check_int),
msg_type: str=REQ(),
content: str=REQ(),
) -> HttpResponse:
message, user_message = access_message(user_profile, message_id)
try:
orjson.loads(content)
except Exception:
return json_error(_("Invalid json for submessage"))
do_add_submessage(
realm=user_profile.realm,
sender_id=user_profile.id,
message_id=message.id,
msg_type=msg_type,
content=content,
)
return json_success()
| apache-2.0 | 5,286,719,226,919,771,000 | 31.882353 | 68 | 0.657424 | false | 4.234848 | false | false | false |
ox-it/moxie | moxie/places/importers/osm.py | 1 | 8808 | # -*- coding: utf-8 -*-
import logging
from xml.sax import handler
from moxie.places.importers.helpers import prepare_document, format_uk_telephone
logger = logging.getLogger(__name__)
DEFAULT_SHOP = '/amenities/shop'
SHOPS = {'supermarket': '/amenities/supermarket',
'department_store': '/amenities/supermarket', # TODO supermarket? or just shop?
'bicycle': '/amenities/shop/bicycle',
'convenience': '/amenities/supermarket/convenience',
#'hairdresser': '/amenities/shop/hairdresser', Disabled due to poor quality of data (TRELLO#144).
'book': '/amenities/shop/book',
'mall': DEFAULT_SHOP,
'deli': DEFAULT_SHOP,
'doityourself': DEFAULT_SHOP,
'newsagent': DEFAULT_SHOP
}
AMENITIES = {'atm': '/amenities/atm',
'bank': '/amenities/bank', # TODO atm=yes?
'bar': '/amenities/food-drink/bar',
'bicycle_parking': '/transport/bicycle-parking',
'cafe': '/amenities/food-drink/cafe', # TODO food=yes?
'cinema': '/leisure/cinema',
'dentist': '/amenities/health/dentist',
'doctors': '/amenities/health/doctor',
'fast_food': '/amenities/food-drink/fast-food',
'hospital': '/amenities/health/hospital',
'library': '/amenities/public-library', # TODO is it?
'parking': '/transport/car-park',
'pharmacy': '/amenities/health/pharmacy',
'post_box': '/amenities/post/post-box',
'post_office': '/amenities/post/post-office',
'pub': '/amenities/food-drink/pub', # TODO food=yes?
'punt_hire': '/leisure/punt',
'recycling': '/amenities/recycling-facility',
'restaurant': '/amenities/food-drink/restaurant',
'swimming_pool': '/leisure/swimming-pool',
'taxi': '/transport/taxi-rank',
'theatre': '/leisure/theatre',
'waste_basket': '/amenities/recycling-facility',
}
PARK_AND_RIDE = '/transport/car-park/park-and-ride'
EMERGENCIES = {'defibrillator': '/amenities/health/defibrillator',
}
class OSMHandler(handler.ContentHandler):
def __init__(self, indexer, precedence, identifier_key='identifiers'):
self.indexer = indexer
self.precedence = precedence
self.identifier_key = identifier_key
# k/v from OSM that we want to import in our "tags"
self.indexed_tags = ['cuisine', 'brand', 'brewery', 'operator']
# We only import element that have one of these key
self.element_tags = ['amenity', 'shop', 'naptan:AtcoCode', 'emergency']
self.pois = []
def startDocument(self):
self.tags = {}
self.valid_node = True
self.create_count, self.modify_count = 0, 0
self.delete_count, self.unchanged_count = 0, 0
self.ignore_count = 0
self.node_locations = {}
def startElement(self, name, attrs):
if name == 'node':
lat = float(attrs['lat'])
lon = float(attrs['lon'])
id = attrs['id']
self.node_location = lat, lon
self.attrs = attrs
self.id = id
self.tags = {}
self.node_locations[id] = lat, lon
elif name == 'tag':
self.tags[attrs['k']] = attrs['v']
elif name == 'way':
self.nodes = []
self.tags = {}
self.attrs = attrs
self.id = attrs['id']
elif name == 'nd':
self.nodes.append(attrs['ref'])
def endElement(self, element_type):
if element_type == 'node':
location = self.node_location
elif element_type == 'way':
min_, max_ = (float('inf'), float('inf')), (float('-inf'), float('-inf'))
for lat, lon in [self.node_locations[n] for n in self.nodes]:
min_ = min(min_[0], lat), min(min_[1], lon)
max_ = max(max_[0], lat), max(max_[1], lon)
location = (min_[0] + max_[0]) / 2, (min_[1] + max_[1]) / 2
try:
if self.tags.get('life_cycle', 'in_use') != 'in_use':
return
for key in self.tags.iterkeys():
if 'disused' in key:
# e.g. disused:amenity=restaurant
# http://wiki.openstreetmap.org/wiki/Key:disused
return
if element_type in ['way', 'node'] and any([x in self.tags for x in self.element_tags]):
result = {}
osm_id = 'osm:%s' % self.id
atco_id = self.tags.get('naptan:AtcoCode', None)
result[self.identifier_key] = [osm_id]
# if it has an ATCO ID, we set the ATCO ID as the main ID for this document
# instead of the OSM ID
if atco_id:
result['id'] = atco_id
result[self.identifier_key].append('atco:%s' % atco_id)
else:
result['id'] = osm_id
result['tags'] = []
for it in self.indexed_tags:
doc_tags = [t.replace('_', ' ').strip() for t in self.tags.get(it, '').split(';')]
if doc_tags and doc_tags != ['']:
result['tags'].extend(doc_tags)
# Filter elements depending on amenity / shop tags
if 'amenity' in self.tags:
if self.tags['amenity'] in AMENITIES:
# special case for Park and Rides where amenity=parking and park_ride=bus/yes/... except no
# TODO we should be able to handle this kind of case in a better way
if self.tags['amenity'] == "parking" and self.tags.get('park_ride', 'no') != 'no':
result['type'] = PARK_AND_RIDE
else:
result['type'] = AMENITIES[self.tags['amenity']]
else:
return
elif 'shop' in self.tags:
if self.tags['shop'] in SHOPS:
result['type'] = SHOPS[self.tags['shop']]
else:
return
elif 'emergency' in self.tags:
if self.tags['emergency'] in EMERGENCIES:
result['type'] = EMERGENCIES[self.tags['emergency']]
else:
return
else:
return
# if the element doesn't have a name, it will be an empty string
result['name'] = self.tags.get('name', self.tags.get('operator', ''))
result['name_sort'] = result['name']
address = "{0} {1} {2} {3}".format(self.tags.get("addr:housename", ""), self.tags.get("addr:housenumber", ""),
self.tags.get("addr:street", ""), self.tags.get("addr:postcode", ""))
result['address'] = " ".join(address.split())
if 'phone' in self.tags:
result['phone'] = format_uk_telephone(self.tags['phone'])
if 'url' in self.tags:
result['website'] = self.tags['url']
if 'website' in self.tags:
result['website'] = self.tags['website']
if 'opening_hours' in self.tags:
result['opening_hours'] = self.tags['opening_hours']
if 'collection_times' in self.tags:
result['collection_times'] = self.tags['collection_times']
result['location'] = "%s,%s" % location
search_results = self.indexer.search_for_ids(
self.identifier_key, result[self.identifier_key])
self.pois.append(prepare_document(result, search_results, self.precedence))
except Exception as e:
logger.warning("Couldn't index a POI.", exc_info=True)
def endDocument(self):
self.indexer.index(self.pois)
self.indexer.commit()
def main():
import argparse
from xml.sax import make_parser
parser = argparse.ArgumentParser()
parser.add_argument('osmfile', type=argparse.FileType('r'))
ns = parser.parse_args()
from moxie.core.search.solr import SolrSearch
solr = SolrSearch('collection1')
handler = OSMHandler(solr, 5)
parser = make_parser(['xml.sax.xmlreader.IncrementalParser'])
parser.setContentHandler(handler)
# Parse in 8k chunks
osm = ns.osmfile
buffer = osm.read(8192)
while buffer:
parser.feed(buffer)
buffer = osm.read(8192)
parser.close()
if __name__ == '__main__':
main()
| apache-2.0 | -8,723,120,305,712,997,000 | 40.352113 | 126 | 0.518506 | false | 3.785131 | false | false | false |
NaohiroTamura/ironic | ironic/objects/volume_target.py | 1 | 11250 | # Copyright (c) 2016 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import base as object_base
from ironic.common import exception
from ironic.db import api as db_api
from ironic.objects import base
from ironic.objects import fields as object_fields
@base.IronicObjectRegistry.register
class VolumeTarget(base.IronicObject,
object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {
'id': object_fields.IntegerField(),
'uuid': object_fields.UUIDField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
'volume_type': object_fields.StringField(nullable=True),
'properties': object_fields.FlexibleDictField(nullable=True),
'boot_index': object_fields.IntegerField(nullable=True),
'volume_id': object_fields.StringField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
}
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get(cls, context, ident):
"""Find a volume target based on its ID or UUID.
:param context: security context
:param ident: the database primary key ID *or* the UUID of a volume
target
:returns: a :class:`VolumeTarget` object
:raises: InvalidIdentity if ident is neither an integer ID nor a UUID
:raises: VolumeTargetNotFound if no volume target with this ident
exists
"""
if strutils.is_int_like(ident):
return cls.get_by_id(context, ident)
elif uuidutils.is_uuid_like(ident):
return cls.get_by_uuid(context, ident)
else:
raise exception.InvalidIdentity(identity=ident)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_id(cls, context, db_id):
"""Find a volume target based on its database ID.
:param context: security context
:param db_id: the database primary key (integer) ID of a volume target
:returns: a :class:`VolumeTarget` object
:raises: VolumeTargetNotFound if no volume target with this ID exists
"""
db_target = cls.dbapi.get_volume_target_by_id(db_id)
target = cls._from_db_object(cls(context), db_target)
return target
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_uuid(cls, context, uuid):
"""Find a volume target based on its UUID.
:param context: security context
:param uuid: the UUID of a volume target
:returns: a :class:`VolumeTarget` object
:raises: VolumeTargetNotFound if no volume target with this UUID exists
"""
db_target = cls.dbapi.get_volume_target_by_uuid(uuid)
target = cls._from_db_object(cls(context), db_target)
return target
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects.
:param context: security context
:param limit: maximum number of resources to return in a single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_target_list(limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects related to a given node ID.
:param context: security context
:param node_id: the integer ID of the node
:param limit: maximum number of resources to return in a single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_targets_by_node_id(
node_id,
limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def create(self, context=None):
"""Create a VolumeTarget record in the DB.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index
:raises: VolumeTargetAlreadyExists if a volume target with the same
UUID exists
"""
values = self.obj_get_changes()
db_target = self.dbapi.create_volume_target(values)
self._from_db_object(self, db_target)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def destroy(self, context=None):
"""Delete the VolumeTarget from the DB.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
self.dbapi.destroy_volume_target(self.uuid)
self.obj_reset_changes()
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def save(self, context=None):
"""Save updates to this VolumeTarget.
Updates will be made column by column based on the result
of self.obj_get_changes().
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: InvalidParameterValue if the UUID is being changed
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
updates = self.obj_get_changes()
updated_target = self.dbapi.update_volume_target(self.uuid, updates)
self._from_db_object(self, updated_target)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def refresh(self, context=None):
"""Loads updates for this VolumeTarget.
Load a volume target with the same UUID from the database
and check for updated attributes. If there are any updates,
they are applied from the loaded volume target, column by column.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
current = self.get_by_uuid(self._context, uuid=self.uuid)
self.obj_refresh(current)
self.obj_reset_changes()
| apache-2.0 | -4,377,000,555,161,214,000 | 46.669492 | 79 | 0.647289 | false | 4.430878 | false | false | false |
hhucn/netsec-uebungssystem | netsecus/student.py | 1 | 5593 | from __future__ import unicode_literals
from . import helper
from . import grading
import collections
Student = collections.namedtuple('Student', ['id'])
NamedStudent = collections.namedtuple('Student', ['student', 'aliases'])
FullStudent = collections.namedtuple('FullStudent', ['student', 'aliases', 'submissions', 'primary_alias'])
def get_full_students(db, where_sql='', filter_params=tuple()):
from . import submission
db.cursor.execute('SELECT id, primary_alias FROM student WHERE deleted IS NOT 1' + where_sql, filter_params)
res = [FullStudent(Student(row[0]), [], [], row[1]) for row in db.cursor.fetchall()]
res_dict = {
fs.student.id: fs for fs in res
}
# Aliases
db.cursor.execute(
'''SELECT student.id, alias.alias FROM student, alias
WHERE student.id = alias.student_id AND student.deleted IS NOT 1''' + where_sql, filter_params)
for student_id, alias in db.cursor.fetchall():
res_dict[student_id].aliases.append(alias)
# Submissions
db.cursor.execute(
'''SELECT
student.id,
submission.id,
submission.sheet_id,
submission.student_id,
submission.time,
submission.files_path,
submission.deleted
FROM student, submission
WHERE student.id = submission.student_id AND student.deleted IS NOT 1''' + where_sql, filter_params)
for row in db.cursor.fetchall():
student_id = row[0]
subm = submission.Submission(*row[1:])
res_dict[student_id].submissions.append(subm)
return res
def get_full_student(db, student_id):
fss = get_full_students(db, ' AND student.id = ?', (student_id,))
if len(fss) != 1:
raise ValueError('Expected exactly one student %r' % student_id)
return fss[0]
def get_studentname_info(db, where_sql='', where_params=[]):
db.cursor.execute('''
SELECT
student.id,
student.primary_alias
FROM student
WHERE (student.deleted IS NOT 1)%s''' % where_sql, where_params)
rows = db.cursor.fetchall()
return [{
'id': row[0],
'primary_alias': row[1],
} for row in rows]
def get_named_student(db, student_id):
db.cursor.execute(
'''SELECT alias.alias FROM alias
WHERE alias.student_id = ?
ORDER BY alias.id''', (student_id,))
rows = db.cursor.fetchall()
return NamedStudent(Student(student_id), [row[0] for row in rows])
def resolve_alias(db, alias):
""" Fetches or creates the student """
email = helper.alias2mail(alias)
db.cursor.execute(
"""SELECT student.id FROM alias, student
WHERE alias.email = ? AND student.id = alias.student_id""",
(email, ))
res = db.cursor.fetchone()
if res:
return Student(res[0])
db.cursor.execute("INSERT INTO student (id, primary_alias, deleted) VALUES (null, ?, 0)", (alias, ))
student = Student(db.cursor.lastrowid)
db.cursor.execute("INSERT INTO alias (student_id, alias, email) VALUES (?, ?, ?)", (student.id, alias, email))
db.database.commit()
return student
def merge(db, main_student_id, merged_student_id):
from . import submission
def _get_student_data(student_id):
db.cursor.execute("""SELECT
submission.id,
submission.sheet_id,
submission.student_id,
submission.time,
submission.files_path,
submission.deleted,
grading_result.id,
grading_result.student_id,
grading_result.sheet_id,
grading_result.submission_id,
grading_result.reviews_json,
grading_result.decipoints,
grading_result.grader,
grading_result.sent_mail_uid,
grading_result.status
FROM
submission LEFT OUTER JOIN grading_result on submission.id = grading_result.submission_id
WHERE submission.student_id = ?""", (student_id,))
res = []
SUBMISSION_FIELDS = 6
for row in db.cursor.fetchall():
sub = submission.Submission(*row[:SUBMISSION_FIELDS])
gr = grading.Grading_Result(*row[SUBMISSION_FIELDS:]) if row[SUBMISSION_FIELDS] else None
res.append((sub, gr))
return res
main_d = _get_student_data(main_student_id)
main_index = {d[0].sheet_id: d for d in main_d}
merged_d = _get_student_data(merged_student_id)
for data in merged_d:
sub, gr = data
if sub.sheet_id in main_index:
continue
new_sub_plan = sub._replace(student_id=main_student_id)
new_sub = submission.create(db, *new_sub_plan[1:])
if gr:
new_gr = gr._replace(student_id=main_student_id, submission_id=new_sub.id)
db.cursor.execute(
'''INSERT INTO grading_result
(student_id, sheet_id, submission_id, reviews_json,
decipoints, grader, sent_mail_uid, status)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
''', new_gr[1:])
db.cursor.execute(
"""UPDATE submission
SET deleted = 1
WHERE student_id = ?""",
(merged_student_id,))
db.cursor.execute(
"""UPDATE alias
SET student_id = ?
WHERE student_id = ?""",
(main_student_id, merged_student_id))
db.cursor.execute(
"""UPDATE student
SET deleted = 1
WHERE id = ?""",
(merged_student_id,))
db.commit()
| mit | -2,899,860,098,836,917,000 | 32.291667 | 114 | 0.587878 | false | 3.892136 | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/encoding.py | 1 | 4786 | from bitmovin.errors import InvalidTypeError
from bitmovin.resources.enums import CloudRegion, EncoderVersion
from bitmovin.utils import Serializable
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from .infrastructure import Infrastructure
class Encoding(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, name, description=None, encoder_version=None, cloud_region=None, id_=None, custom_data=None,
infrastructure_id=None, infrastructure=None, labels=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._encoderVersion = None
self.encoderVersion = encoder_version
self._cloudRegion = None
self.cloudRegion = cloud_region
self.infrastructureId = infrastructure_id
self._infrastructure = None
self.infrastructure = infrastructure
self._labels = None
self.labels = labels
@property
def cloudRegion(self):
if self._cloudRegion is not None:
return self._cloudRegion
else:
return CloudRegion.default().value
@cloudRegion.setter
def cloudRegion(self, new_region):
if new_region is None:
return
if isinstance(new_region, str):
self._cloudRegion = new_region
elif isinstance(new_region, CloudRegion):
self._cloudRegion = new_region.value
else:
raise InvalidTypeError(
'Invalid type {} for cloudRegion: must be either str or CloudRegion!'.format(type(new_region)))
@property
def encoderVersion(self):
if self._encoderVersion is not None:
return self._encoderVersion
else:
return EncoderVersion.default().value
@property
def infrastructure(self):
return self._infrastructure
@infrastructure.setter
def infrastructure(self, new_infrastructure):
if new_infrastructure is None:
self._infrastructure = None
return
if isinstance(new_infrastructure, Infrastructure):
self._infrastructure = new_infrastructure
else:
raise InvalidTypeError(
'Invalid type {} for infrastructure: must be Infrastructure!'.format(
type(new_infrastructure)
)
)
@encoderVersion.setter
def encoderVersion(self, new_version):
if new_version is None:
return
if isinstance(new_version, str):
self._encoderVersion = new_version
elif isinstance(new_version, EncoderVersion):
self._encoderVersion = new_version.value
else:
raise InvalidTypeError(
'Invalid type {} for encoderVersion: must be either str or EncoderVersion!'.format(type(new_version)))
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, new_labels):
if new_labels is None:
self._labels = None
return
if not isinstance(new_labels, list):
raise InvalidTypeError('new_labels has to be a list of strings')
if all(isinstance(label, str) for label in new_labels):
self._labels = new_labels
else:
raise InvalidTypeError('new_labels has to be a list of strings')
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
custom_data = json_object.get('customData')
name = json_object['name']
description = json_object.get('description')
encoder_version = json_object.get('encoderVersion')
cloud_region = json_object.get('cloudRegion')
infrastructure_id = json_object.get('infrastructureId')
labels = json_object.get('labels')
infrastructure_json = json_object.get('infrastructure')
infrastructure = None
if infrastructure_json is not None:
infrastructure = Infrastructure.parse_from_json_object(infrastructure_json)
encoding = Encoding(id_=id_, custom_data=custom_data,
name=name, description=description, encoder_version=encoder_version,
cloud_region=cloud_region, infrastructure_id=infrastructure_id,
infrastructure=infrastructure, labels=labels)
return encoding
def serialize(self):
serialized = super().serialize()
serialized['cloudRegion'] = self.cloudRegion
serialized['encoderVersion'] = self.encoderVersion
serialized['infrastructure'] = self.infrastructure
serialized['labels'] = self.labels
return serialized
| unlicense | -330,824,083,189,929,000 | 36.685039 | 118 | 0.639365 | false | 4.615236 | false | false | false |
thobbs/cassandra-dtest | upgrade_tests/cql_tests.py | 1 | 233827 | # coding: utf-8
import itertools
import math
import random
import struct
import time
from collections import OrderedDict
from distutils.version import LooseVersion
from unittest import skipUnless
from uuid import UUID, uuid4
from cassandra import ConsistencyLevel, InvalidRequest
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.protocol import ProtocolException, SyntaxException
from cassandra.query import SimpleStatement
from cassandra.util import sortedset
from nose.exc import SkipTest
from nose.tools import assert_not_in
from assertions import (assert_all, assert_invalid, assert_length_equal,
assert_none, assert_one, assert_row_count)
from dtest import RUN_STATIC_UPGRADE_MATRIX, debug, freshCluster
from thrift_bindings.v22.ttypes import \
ConsistencyLevel as ThriftConsistencyLevel
from thrift_bindings.v22.ttypes import (CfDef, Column, ColumnDef,
ColumnOrSuperColumn, ColumnParent,
Deletion, Mutation, SlicePredicate,
SliceRange)
from thrift_tests import get_thrift_client
from tools import known_failure, require, rows_to_list, since
from upgrade_base import UpgradeTester
from upgrade_manifest import build_upgrade_pairs
class TestCQL(UpgradeTester):
def static_cf_test(self):
""" Test static CF syntax """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
assert_one(cursor, "SELECT firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", ['Frodo', 'Baggins'])
assert_one(cursor, "SELECT * FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", [UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins'])
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins']])
# Test batch inserts
cursor.execute("""
BEGIN BATCH
INSERT INTO users (userid, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 36)
UPDATE users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
DELETE firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000
DELETE firstname, lastname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
APPLY BATCH
""")
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None], [UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None]])
@since('2.0', max_version='2.2.X')
def large_collection_errors_test(self):
""" For large collections, make sure that we are printing warnings """
for version in self.get_node_versions():
if version >= '3.0':
raise SkipTest('version {} not compatible with protocol version 2'.format(version))
# We only warn with protocol 2
cursor = self.prepare(protocol_version=2)
cluster = self.cluster
node1 = cluster.nodelist()[0]
self.ignore_log_patterns = ["Detected collection for table"]
cursor.execute("""
CREATE TABLE maps (
userid text PRIMARY KEY,
properties map<int, text>
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE maps")
# Insert more than the max, which is 65535
for i in range(70000):
cursor.execute("UPDATE maps SET properties[{}] = 'x' WHERE userid = 'user'".format(i))
# Query for the data and throw exception
cursor.execute("SELECT properties FROM maps WHERE userid = 'user'")
node1.watch_log_for("Detected collection for table ks.maps with 70000 elements, more than the 65535 limit. "
"Only the first 65535 elements will be returned to the client. "
"Please see http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.")
def noncomposite_static_cf_test(self):
""" Test non-composite static CF syntax """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname ascii,
lastname ascii,
age int
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
assert_one(cursor, "SELECT firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", ['Frodo', 'Baggins'])
assert_one(cursor, "SELECT * FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", [UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins'])
# FIXME There appears to be some sort of problem with reusable cells
# when executing this query. It's likely that CASSANDRA-9705 will
# fix this, but I'm not 100% sure.
assert_one(cursor, "SELECT * FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479", [UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'])
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins']])
# Test batch inserts
cursor.execute("""
BEGIN BATCH
INSERT INTO users (userid, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 36)
UPDATE users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
DELETE firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000
DELETE firstname, lastname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
APPLY BATCH
""")
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None]])
def dynamic_cf_test(self):
""" Test non-composite dynamic CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid uuid,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://foo.bar', 42)")
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://foo-2.bar', 24)")
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://bar.bar', 128)")
cursor.execute("UPDATE clicks SET time = 24 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 and url = 'http://bar.foo'")
cursor.execute("UPDATE clicks SET time = 12 WHERE userid IN (f47ac10b-58cc-4372-a567-0e02b2c3d479, 550e8400-e29b-41d4-a716-446655440000) and url = 'http://foo-3'")
# Queries
assert_all(cursor, "SELECT url, time FROM clicks WHERE userid = 550e8400-e29b-41d4-a716-446655440000",
[['http://bar.bar', 128], ['http://foo-2.bar', 24], ['http://foo-3', 12], ['http://foo.bar', 42]])
assert_all(cursor, "SELECT * FROM clicks WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479",
[[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 'http://bar.foo', 24],
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 'http://foo-3', 12]])
assert_all(cursor, "SELECT time FROM clicks", [[24], [12], [128], [24], [12], [42]])
# Check we don't allow empty values for url since this is the full underlying cell name (#6152)
assert_invalid(cursor, "INSERT INTO clicks (userid, url, time) VALUES (810e8500-e29b-41d4-a716-446655440000, '', 42)")
def dense_cf_test(self):
""" Test composite 'dense' CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE connections (
userid uuid,
ip text,
port int,
time bigint,
PRIMARY KEY (userid, ip, port)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE connections")
# Inserts
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.1', 80, 42)")
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.2', 80, 24)")
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.2', 90, 42)")
cursor.execute("UPDATE connections SET time = 24 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.2' AND port = 80")
# we don't have to include all of the clustering columns (see CASSANDRA-7990)
cursor.execute("INSERT INTO connections (userid, ip, time) VALUES (f47ac10b-58cc-4372-a567-0e02b2c3d479, '192.168.0.3', 42)")
cursor.execute("UPDATE connections SET time = 42 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.4'")
# Queries
assert_all(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000",
[['192.168.0.1', 80, 42], ['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]])
assert_all(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip >= '192.168.0.2'",
[['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]])
assert_all(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip = '192.168.0.2'",
[['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]])
assert_none(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip > '192.168.0.2'")
assert_one(cursor, "SELECT ip, port, time FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'",
['192.168.0.3', None, 42])
assert_one(cursor, "SELECT ip, port, time FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.4'",
['192.168.0.4', None, 42])
# Deletion
cursor.execute("DELETE time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND ip = '192.168.0.2' AND port = 80")
res = list(cursor.execute("SELECT * FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000"))
assert_length_equal(res, 2)
cursor.execute("DELETE FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert_none(cursor, "SELECT * FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
cursor.execute("DELETE FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
assert_none(cursor, "SELECT * FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
def sparse_cf_test(self):
""" Test composite 'sparse' CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE timeline (
userid uuid,
posted_month int,
posted_day int,
body ascii,
posted_by ascii,
PRIMARY KEY (userid, posted_month, posted_day)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE timeline")
frodo_id = UUID('550e8400-e29b-41d4-a716-446655440000')
sam_id = UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')
# Inserts
cursor.execute("INSERT INTO timeline (userid, posted_month, posted_day, body, posted_by) VALUES (%s, 1, 12, 'Something else', 'Frodo Baggins')", (frodo_id,))
cursor.execute("INSERT INTO timeline (userid, posted_month, posted_day, body, posted_by) VALUES (%s, 1, 24, 'Something something', 'Frodo Baggins')", (frodo_id,))
cursor.execute("UPDATE timeline SET body = 'Yo Froddo', posted_by = 'Samwise Gamgee' WHERE userid = %s AND posted_month = 1 AND posted_day = 3", (sam_id,))
cursor.execute("UPDATE timeline SET body = 'Yet one more message' WHERE userid = %s AND posted_month = 1 and posted_day = 30", (frodo_id,))
# Queries
assert_one(cursor, "SELECT body, posted_by FROM timeline WHERE userid = {} AND posted_month = 1 AND posted_day = 24".format(frodo_id), ['Something something', 'Frodo Baggins'])
assert_all(cursor, "SELECT posted_day, body, posted_by FROM timeline WHERE userid = {} AND posted_month = 1 AND posted_day > 12".format(frodo_id), [
[24, 'Something something', 'Frodo Baggins'],
[30, 'Yet one more message', None]])
assert_all(cursor, "SELECT posted_day, body, posted_by FROM timeline WHERE userid = {} AND posted_month = 1".format(frodo_id), [
[12, 'Something else', 'Frodo Baggins'],
[24, 'Something something', 'Frodo Baggins'],
[30, 'Yet one more message', None]])
@freshCluster()
def limit_ranges_test(self):
""" Validate LIMIT option for 'range queries' in SELECT statements """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES ({}, 'http://foo.{}', 42)".format(id, tld))
# Queries
assert_one(cursor, "SELECT * FROM clicks WHERE token(userid) >= token(2) LIMIT 1", [2, 'http://foo.com', 42])
assert_one(cursor, "SELECT * FROM clicks WHERE token(userid) > token(2) LIMIT 1", [3, 'http://foo.com', 42])
def limit_multiget_test(self):
""" Validate LIMIT option for 'multiget' in SELECT statements """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES ({}, 'http://foo.{}', 42)".format(id, tld))
# Check that we do limit the output to 1 *and* that we respect query
# order of keys (even though 48 is after 2)
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_one(cursor, "SELECT * FROM clicks WHERE userid IN (48, 2) LIMIT 1", [2, 'http://foo.com', 42])
else:
# the coordinator is the non-upgraded 2.1 node
assert_one(cursor, "SELECT * FROM clicks WHERE userid IN (48, 2) LIMIT 1", [48, 'http://foo.com', 42])
def simple_tuple_query_test(self):
"""Covers CASSANDRA-8613"""
cursor = self.prepare()
cursor.execute("create table bard (a int, b int, c int, d int , e int, PRIMARY KEY (a, b, c, d, e))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE bard")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 2, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 1, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 2, 2, 2);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 3, 3, 3);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
assert_all(cursor, "SELECT * FROM bard WHERE b=0 AND (c, d, e) > (1, 1, 1) ALLOW FILTERING;", [[0, 0, 2, 2, 2], [0, 0, 3, 3, 3]])
def limit_sparse_test(self):
""" Validate LIMIT option for sparse table in SELECT statements """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
day int,
month text,
year int,
PRIMARY KEY (userid, url)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, day, month, year) VALUES ({}, 'http://foo.{}', 1, 'jan', 2012)".format(id, tld))
# Queries
# Check we do get as many rows as requested
res = list(cursor.execute("SELECT * FROM clicks LIMIT 4"))
assert_length_equal(res, 4)
def counters_test(self):
""" Validate counter support """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
total counter,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
cursor.execute("UPDATE clicks SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [1])
cursor.execute("UPDATE clicks SET total = total - 4 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-3])
cursor.execute("UPDATE clicks SET total = total+1 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-2])
cursor.execute("UPDATE clicks SET total = total -2 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-4])
def indexed_with_eq_test(self):
""" Check that you can query for an indexed column even with a key EQ clause """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
cursor.execute("CREATE INDEX byAge ON users(age)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
assert_none(cursor, "SELECT firstname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND age = 33")
assert_one(cursor, "SELECT firstname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND age = 33", ['Samwise'])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11878',
flaky=True)
def select_key_in_test(self):
""" Query for KEY IN (...) """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("""
INSERT INTO users (userid, firstname, lastname, age)
VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)
""")
cursor.execute("""
INSERT INTO users (userid, firstname, lastname, age)
VALUES (f47ac10b-58cc-4372-a567-0e02b2c3d479, 'Samwise', 'Gamgee', 33)
""")
# Select
res = list(cursor.execute("""
SELECT firstname, lastname FROM users
WHERE userid IN (550e8400-e29b-41d4-a716-446655440000, f47ac10b-58cc-4372-a567-0e02b2c3d479)
"""))
assert_length_equal(res, 2)
def exclusive_slice_test(self):
""" Test SELECT respects inclusive and exclusive bounds """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, %s, %s)", (x, x))
# Queries
assert_all(cursor, "SELECT v FROM test WHERE k = 0", [[x] for x in range(10)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c <= 6", [[x] for x in range(2, 7)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c > 2 AND c <= 6", [[x] for x in range(3, 7)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6", [[x] for x in range(2, 6)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c > 2 AND c < 6", [[x] for x in range(3, 6)])
# With LIMIT
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c > 2 AND c <= 6 LIMIT 2", [[3], [4]])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6 ORDER BY c DESC LIMIT 2", [[5], [4]])
def in_clause_wide_rows_test(self):
""" Check IN support for 'wide rows' in SELECT statement """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# composites
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
cursor.execute("TRUNCATE test2")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, %i, %i)" % (x, x))
assert_all(cursor, "SELECT v FROM test1 WHERE k = 0 AND c IN (5, 2, 8)", [[2], [5], [8]])
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, 0, {}, {})".format(x, x))
# Check first we don't allow IN everywhere
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_none(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3")
else:
# the coordinator is the non-upgraded 2.1 node
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3")
assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 = 0 AND c2 IN (5, 2, 8)", [[2], [5], [8]])
def order_by_test(self):
""" Check ORDER BY support in SELECT statement """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# composites
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
cursor.execute("TRUNCATE test2")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, {}, {})".format(x, x))
assert_all(cursor, "SELECT v FROM test1 WHERE k = 0 ORDER BY c DESC", [[x] for x in reversed(range(10))])
# Inserts
for x in range(0, 4):
for y in range(0, 2):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, {}, {}, {})".format(x, y, x * 2 + y))
# Check first we don't always ORDER BY
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c DESC")
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c2 DESC")
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY k DESC")
assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1 DESC", [[x] for x in reversed(range(8))])
assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1", [[x] for x in range(8)])
def more_order_by_test(self):
""" More ORDER BY checks (#4160) """
cursor = self.prepare()
cursor.execute("""
CREATE COLUMNFAMILY Test (
row text,
number int,
string text,
PRIMARY KEY (row, number)
) WITH COMPACT STORAGE
""")
cursor.execute("""
CREATE COLUMNFAMILY test2 (
row text,
number int,
number2 int,
string text,
PRIMARY KEY (row, number, number2)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 1, 'one');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 2, 'two');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 3, 'three');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 4, 'four');")
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number < 3 ORDER BY number ASC;", [[1], [2]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number >= 3 ORDER BY number ASC;", [[3], [4]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number < 3 ORDER BY number DESC;", [[2], [1]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number >= 3 ORDER BY number DESC;", [[4], [3]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number > 3 ORDER BY number DESC;", [[4]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number <= 3 ORDER BY number DESC;", [[3], [2], [1]])
# composite clustering
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 1, 0, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 2, 0, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 2, 1, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 3, 0, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 3, 1, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 4, 0, 'a');")
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number < 3 ORDER BY number ASC;", [[1, 0], [2, 0], [2, 1]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number >= 3 ORDER BY number ASC;", [[3, 0], [3, 1], [4, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number < 3 ORDER BY number DESC;", [[2, 1], [2, 0], [1, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number >= 3 ORDER BY number DESC;", [[4, 0], [3, 1], [3, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number > 3 ORDER BY number DESC;", [[4, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number <= 3 ORDER BY number DESC;", [[3, 1], [3, 0], [2, 1], [2, 0], [1, 0]])
def order_by_validation_test(self):
""" Check we don't allow order by on row key (#4246) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
v int,
PRIMARY KEY (k1, k2)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
q = "INSERT INTO test (k1, k2, v) VALUES (%d, %d, %d)"
cursor.execute(q % (0, 0, 0))
cursor.execute(q % (1, 1, 1))
cursor.execute(q % (2, 2, 2))
assert_invalid(cursor, "SELECT * FROM test ORDER BY k2")
def order_by_with_in_test(self):
""" Check that order-by works with IN (#4327) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
my_id varchar,
col1 int,
value varchar,
PRIMARY KEY (my_id, col1)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = None
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key1', 1, 'a')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key2', 3, 'c')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key3', 2, 'b')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key4', 4, 'd')")
query = "SELECT col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
assert_all(cursor, query, [[1], [2], [3]])
query = "SELECT col1, my_id FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
assert_all(cursor, query, [[1, 'key1'], [2, 'key3'], [3, 'key2']])
query = "SELECT my_id, col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
assert_all(cursor, query, [['key1', 1], ['key3', 2], ['key2', 3]])
def reversed_comparator_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH CLUSTERING ORDER BY (c DESC);
""")
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v text,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, {}, {})".format(x, x))
assert_all(cursor, "SELECT c, v FROM test WHERE k = 0 ORDER BY c ASC", [[x, x] for x in range(0, 10)])
assert_all(cursor, "SELECT c, v FROM test WHERE k = 0 ORDER BY c DESC", [[x, x] for x in range(9, -1, -1)])
# Inserts
for x in range(0, 10):
for y in range(0, 10):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, {}, {}, '{}{}')".format(x, y, x, y))
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC, c2 ASC")
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 DESC, c2 DESC")
assert_all(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC", [[x, y, '{}{}'.format(x, y)] for x in range(0, 10) for y in range(9, -1, -1)])
assert_all(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC, c2 DESC", [[x, y, '{}{}'.format(x, y)] for x in range(0, 10) for y in range(9, -1, -1)])
assert_all(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 DESC, c2 ASC", [[x, y, '{}{}'.format(x, y)] for x in range(9, -1, -1) for y in range(0, 10)])
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c2 DESC, c1 ASC")
def null_support_test(self):
""" Test support for nulls """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v1 int,
v2 set<text>,
PRIMARY KEY (k, c)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Inserts
cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (0, 0, null, {'1', '2'})")
cursor.execute("INSERT INTO test (k, c, v1) VALUES (0, 1, 1)")
assert_all(cursor, "SELECT * FROM test", [[0, 0, None, set(['1', '2'])], [0, 1, 1, None]])
cursor.execute("INSERT INTO test (k, c, v1) VALUES (0, 1, null)")
cursor.execute("INSERT INTO test (k, c, v2) VALUES (0, 0, null)")
assert_all(cursor, "SELECT * FROM test", [[0, 0, None, None], [0, 1, None, None]])
assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 2, {1, null})")
assert_invalid(cursor, "SELECT * FROM test WHERE k = null")
assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 0, { 'foo', 'bar', null })")
def nameless_index_test(self):
""" Test CREATE INDEX without name and validate the index can be dropped """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE users (
id text PRIMARY KEY,
birth_year int,
)
""")
cursor.execute("CREATE INDEX on users(birth_year)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Tom', 42)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Paul', 24)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Bob', 42)")
assert_all(cursor, "SELECT id FROM users WHERE birth_year = 42", [['Tom'], ['Bob']])
def deletion_test(self):
""" Test simple deletion and in particular check for #4193 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE testcf (
username varchar,
id int,
name varchar,
stuff varchar,
PRIMARY KEY(username, id)
);
""")
# Compact case
cursor.execute("""
CREATE TABLE testcf2 (
username varchar,
id int,
name varchar,
stuff varchar,
PRIMARY KEY(username, id, name)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE testcf")
cursor.execute("TRUNCATE testcf2")
q = "INSERT INTO testcf (username, id, name, stuff) VALUES (%s, %s, %s, %s);"
row1 = ('abc', 2, 'rst', 'some value')
row2 = ('abc', 4, 'xyz', 'some other value')
cursor.execute(q, row1)
cursor.execute(q, row2)
assert_all(cursor, "SELECT * FROM testcf", [list(row1), list(row2)])
cursor.execute("DELETE FROM testcf WHERE username='abc' AND id=2")
assert_all(cursor, "SELECT * FROM testcf", [list(row2)])
q = "INSERT INTO testcf2 (username, id, name, stuff) VALUES (%s, %s, %s, %s);"
row1 = ('abc', 2, 'rst', 'some value')
row2 = ('abc', 4, 'xyz', 'some other value')
cursor.execute(q, row1)
cursor.execute(q, row2)
assert_all(cursor, "SELECT * FROM testcf2", [list(row1), list(row2)])
cursor.execute("DELETE FROM testcf2 WHERE username='abc' AND id=2")
assert_all(cursor, "SELECT * FROM testcf", [list(row2)])
def count_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE events (
kind text,
time int,
value1 int,
value2 int,
PRIMARY KEY(kind, time)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE events")
full = "INSERT INTO events (kind, time, value1, value2) VALUES ('ev1', %d, %d, %d)"
no_v2 = "INSERT INTO events (kind, time, value1) VALUES ('ev1', %d, %d)"
cursor.execute(full % (0, 0, 0))
cursor.execute(full % (1, 1, 1))
cursor.execute(no_v2 % (2, 2))
cursor.execute(full % (3, 3, 3))
cursor.execute(no_v2 % (4, 4))
cursor.execute("INSERT INTO events (kind, time, value1, value2) VALUES ('ev2', 0, 0, 0)")
assert_all(cursor, "SELECT COUNT(*) FROM events WHERE kind = 'ev1'", [[5]])
assert_all(cursor, "SELECT COUNT(1) FROM events WHERE kind IN ('ev1', 'ev2') AND time=0", [[2]])
def batch_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE users (
userid text PRIMARY KEY,
name text,
password text
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
query = SimpleStatement("""
BEGIN BATCH
INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
DELETE name FROM users WHERE userid = 'user1';
APPLY BATCH;
""", consistency_level=ConsistencyLevel.QUORUM)
cursor.execute(query)
def token_range_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c int,
v int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
c = 100
for i in range(0, c):
cursor.execute("INSERT INTO test (k, c, v) VALUES ({}, {}, {})".format(i, i, i))
rows = cursor.execute("SELECT k FROM test")
inOrder = [x[0] for x in rows]
assert_length_equal(inOrder, c)
min_token = -2 ** 63
res = list(cursor.execute("SELECT k FROM test WHERE token(k) >= {}".format(min_token)))
assert_length_equal(res, c)
# assert_invalid(cursor, "SELECT k FROM test WHERE token(k) >= 0")
# cursor.execute("SELECT k FROM test WHERE token(k) >= 0")
assert_all(cursor, "SELECT k FROM test WHERE token(k) >= token({}) AND token(k) < token({})".format(inOrder[32], inOrder[65]), [[inOrder[x]] for x in range(32, 65)])
def timestamp_and_ttl_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c text,
d text
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c) VALUES (1, 'test')")
cursor.execute("INSERT INTO test (k, c) VALUES (2, 'test') USING TTL 400")
res = list(cursor.execute("SELECT k, c, writetime(c), ttl(c) FROM test"))
assert_length_equal(res, 2)
for r in res:
self.assertIsInstance(r[2], (int, long))
if r[0] == 1:
self.assertIsNone(r[3], res)
else:
self.assertIsInstance(r[3], (int, long))
# wrap writetime(), ttl() in other functions (test for CASSANDRA-8451)
res = list(cursor.execute("SELECT k, c, blobAsBigint(bigintAsBlob(writetime(c))), ttl(c) FROM test"))
assert_length_equal(res, 2)
for r in res:
self.assertIsInstance(r[2], (int, long))
if r[0] == 1:
self.assertIsNone(r[3], res)
else:
self.assertIsInstance(r[3], (int, long))
res = list(cursor.execute("SELECT k, c, writetime(c), blobAsInt(intAsBlob(ttl(c))) FROM test"))
assert_length_equal(res, 2)
for r in res:
self.assertIsInstance(r[2], (int, long))
if r[0] == 1:
self.assertIsNone(r[3], res)
else:
self.assertIsInstance(r[3], (int, long))
assert_invalid(cursor, "SELECT k, c, writetime(k) FROM test")
res = cursor.execute("SELECT k, d, writetime(d) FROM test WHERE k = 1")
assert_one(cursor, "SELECT k, d, writetime(d) FROM test WHERE k = 1", [1, None, None])
def no_range_ghost_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
)
""")
# Example from #3505
cursor.execute("CREATE KEYSPACE ks1 with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };")
cursor.execute("""
CREATE COLUMNFAMILY ks1.users (
KEY varchar PRIMARY KEY,
password varchar,
gender varchar,
birth_year bigint)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE ks1.users")
for k in range(0, 5):
cursor.execute("INSERT INTO test (k, v) VALUES (%d, 0)" % k)
assert_all(cursor, "SELECT k FROM test", [[k] for k in range(0, 5)], ignore_order=True)
cursor.execute("DELETE FROM test WHERE k=2")
assert_all(cursor, "SELECT k FROM test", [[k] for k in range(0, 5) if k is not 2], ignore_order=True)
# Example from #3505
cursor.execute("USE ks1")
cursor.execute("INSERT INTO users (KEY, password) VALUES ('user1', 'ch@ngem3a')")
cursor.execute("UPDATE users SET gender = 'm', birth_year = 1980 WHERE KEY = 'user1'")
assert_all(cursor, "SELECT * FROM users WHERE KEY='user1'", [['user1', 1980, 'm', 'ch@ngem3a']])
cursor.execute("TRUNCATE users")
assert_all(cursor, "SELECT * FROM users", [])
assert_all(cursor, "SELECT * FROM users WHERE KEY='user1'", [])
@freshCluster()
def undefined_column_handling_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test (k, v1) VALUES (1, 1)")
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (2, 2, 2)")
assert_all(cursor, "SELECT v2 FROM test", [[0], [None], [2]])
assert_all(cursor, "SELECT v2 FROM test WHERE k = 1", [[None]])
@freshCluster()
def range_tombstones_test(self):
""" Test deletion by 'composite prefix' (range tombstones) """
# Uses 3 nodes just to make sure RowMutation are correctly serialized
cursor = self.prepare(nodes=3)
cursor.execute("""
CREATE TABLE test1 (
k int,
c1 int,
c2 int,
v1 int,
v2 int,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
rows = 5
col1 = 2
col2 = 2
cpr = col1 * col2
for i in xrange(0, rows):
for j in xrange(0, col1):
for k in xrange(0, col2):
n = (i * cpr) + (j * col2) + k
cursor.execute("INSERT INTO test1 (k, c1, c2, v1, v2) VALUES ({}, {}, {}, {}, {})".format(i, j, k, n, n))
for i in xrange(0, rows):
assert_all(cursor, "SELECT v1, v2 FROM test1 where k = %d" % i, [[x, x] for x in xrange(i * cpr, (i + 1) * cpr)])
for i in xrange(0, rows):
cursor.execute("DELETE FROM test1 WHERE k = %d AND c1 = 0" % i)
for i in xrange(0, rows):
assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)])
self.cluster.flush()
time.sleep(0.2)
for i in xrange(0, rows):
assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)])
def range_tombstones_compaction_test(self):
""" Test deletion by 'composite prefix' (range tombstones) with compaction """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c1 int,
c2 int,
v1 text,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
for c1 in range(0, 4):
for c2 in range(0, 2):
cursor.execute("INSERT INTO test1 (k, c1, c2, v1) VALUES (0, %d, %d, '%s')" % (c1, c2, '%i%i' % (c1, c2)))
self.cluster.flush()
cursor.execute("DELETE FROM test1 WHERE k = 0 AND c1 = 1")
self.cluster.flush()
self.cluster.compact()
assert_all(cursor, "SELECT v1 FROM test1 WHERE k = 0", [['{}{}'.format(c1, c2)] for c1 in xrange(0, 4) for c2 in xrange(0, 2) if c1 != 1])
def delete_row_test(self):
""" Test deletion of rows """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c1 int,
c2 int,
v1 int,
v2 int,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
q = "INSERT INTO test (k, c1, c2, v1, v2) VALUES (%d, %d, %d, %d, %d)"
cursor.execute(q % (0, 0, 0, 0, 0))
cursor.execute(q % (0, 0, 1, 1, 1))
cursor.execute(q % (0, 0, 2, 2, 2))
cursor.execute(q % (0, 1, 0, 3, 3))
cursor.execute("DELETE FROM test WHERE k = 0 AND c1 = 0 AND c2 = 0")
res = list(cursor.execute("SELECT * FROM test"))
assert_length_equal(res, 3)
def range_query_2ndary_test(self):
""" Test range queries with 2ndary indexes (#4257) """
cursor = self.prepare()
cursor.execute("CREATE TABLE indextest (id int primary key, row int, setid int);")
cursor.execute("CREATE INDEX indextest_setid_idx ON indextest (setid)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE indextest")
q = "INSERT INTO indextest (id, row, setid) VALUES (%d, %d, %d);"
cursor.execute(q % (0, 0, 0))
cursor.execute(q % (1, 1, 0))
cursor.execute(q % (2, 2, 0))
cursor.execute(q % (3, 3, 0))
assert_invalid(cursor, "SELECT * FROM indextest WHERE setid = 0 AND row < 1;")
assert_all(cursor, "SELECT * FROM indextest WHERE setid = 0 AND row < 1 ALLOW FILTERING;", [[0, 0, 0]])
def set_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
tags set<text>,
PRIMARY KEY (fn, ln)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE user")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "tags = tags + { 'foo' }")
cursor.execute(q % "tags = tags + { 'bar' }")
cursor.execute(q % "tags = tags + { 'foo' }")
cursor.execute(q % "tags = tags + { 'foobar' }")
cursor.execute(q % "tags = tags - { 'bar' }")
assert_all(cursor, "SELECT tags FROM user", [[set(['foo', 'foobar'])]])
q = "UPDATE user SET {} WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q.format("tags = { 'a', 'c', 'b' }"))
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[set(['a', 'b', 'c'])]])
time.sleep(.01)
cursor.execute(q.format("tags = { 'm', 'n' }"))
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[set(['m', 'n'])]])
cursor.execute("DELETE tags['m'] FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[set(['n'])]])
cursor.execute("DELETE tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [])
def map_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
m map<text, int>,
PRIMARY KEY (fn, ln)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE user")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "m['foo'] = 3")
cursor.execute(q % "m['bar'] = 4")
cursor.execute(q % "m['woot'] = 5")
cursor.execute(q % "m['bar'] = 6")
cursor.execute("DELETE m['foo'] FROM user WHERE fn='Tom' AND ln='Bombadil'")
assert_all(cursor, "SELECT m FROM user", [[{'woot': 5, 'bar': 6}]])
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "m = { 'a' : 4 , 'c' : 3, 'b' : 2 }")
assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[{'a': 4, 'b': 2, 'c': 3}]])
time.sleep(.01)
# Check we correctly overwrite
cursor.execute(q % "m = { 'm' : 4 , 'n' : 1, 'o' : 2 }")
assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[{'m': 4, 'n': 1, 'o': 2}]])
cursor.execute(q % "m = {}")
assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [])
def list_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
tags list<text>,
PRIMARY KEY (fn, ln)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE user")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "tags = tags + [ 'foo' ]")
cursor.execute(q % "tags = tags + [ 'bar' ]")
cursor.execute(q % "tags = tags + [ 'foo' ]")
cursor.execute(q % "tags = tags + [ 'foobar' ]")
assert_one(cursor, "SELECT tags FROM user", [['foo', 'bar', 'foo', 'foobar']])
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "tags = [ 'a', 'c', 'b', 'c' ]")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['a', 'c', 'b', 'c']])
cursor.execute(q % "tags = [ 'm', 'n' ] + tags")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'a', 'c', 'b', 'c']])
cursor.execute(q % "tags[2] = 'foo', tags[4] = 'bar'")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'foo', 'c', 'bar', 'c']])
cursor.execute("DELETE tags[2] FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'c', 'bar', 'c']])
cursor.execute(q % "tags = tags - [ 'bar' ]")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'c', 'c']])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12399',
flaky=False)
def multi_collection_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE foo(
k uuid PRIMARY KEY,
L list<int>,
M map<text, int>,
S set<int>
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE foo")
cursor.execute("UPDATE ks.foo SET L = [1, 3, 5] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET L = L + [7, 11, 13] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET S = {1, 3, 5} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET S = S + {7, 11, 13} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET M = {'foo': 1, 'bar' : 3} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET M = M + {'foobar' : 4} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
assert_all(cursor, "SELECT L, M, S FROM foo WHERE k = b017f48f-ae67-11e1-9096-005056c00008", [[
[1, 3, 5, 7, 11, 13],
OrderedDict([('bar', 3), ('foo', 1), ('foobar', 4)]),
sortedset([1, 3, 5, 7, 11, 13])
]])
def range_query_test(self):
""" Range test query from #4372 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (a int, b int, c int, d int, e int, f text, PRIMARY KEY (a, b, c, d, e) )")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 2, '2');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 1, '1');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 2, 1, '1');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 3, '3');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 5, '5');")
assert_all(cursor, "SELECT a, b, c, d, e, f FROM test WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 AND e >= 2;", [[1, 1, 1, 1, 2, u'2'], [1, 1, 1, 1, 3, u'3'], [1, 1, 1, 1, 5, u'5']])
def composite_row_key_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
c int,
v int,
PRIMARY KEY ((k1, k2), c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
req = "INSERT INTO test (k1, k2, c, v) VALUES ({}, {}, {}, {})"
for i in range(0, 4):
cursor.execute(req.format(0, i, i, i))
assert_all(cursor, "SELECT * FROM test", [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]])
assert_all(cursor, "SELECT * FROM test WHERE k1 = 0 and k2 IN (1, 3)", [[0, 1, 1, 1], [0, 3, 3, 3]])
assert_invalid(cursor, "SELECT * FROM test WHERE k2 = 3")
if self.get_node_version(is_upgraded) < '2.2':
# the coordinator is the upgraded 2.2+ node
assert_invalid(cursor, "SELECT * FROM test WHERE k1 IN (0, 1) and k2 = 3")
assert_all(cursor, "SELECT * FROM test WHERE token(k1, k2) = token(0, 1)", [[0, 1, 1, 1]])
assert_all(cursor, "SELECT * FROM test WHERE token(k1, k2) > " + str(-((2 ** 63) - 1)), [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]])
def cql3_insert_thrift_test(self):
""" Check that we can insert from thrift into a CQL3 table (#4377) """
cursor = self.prepare(start_rpc=True)
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
node = self.cluster.nodelist()[0]
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
key = struct.pack('>i', 2)
column_name_component = struct.pack('>i', 4)
# component length + component + EOC + component length + component + EOC
column_name = '\x00\x04' + column_name_component + '\x00' + '\x00\x01' + 'v' + '\x00'
value = struct.pack('>i', 8)
client.batch_mutate(
{key: {'test': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=value, timestamp=100)))]}},
ThriftConsistencyLevel.ONE)
assert_one(cursor, "SELECT * FROM test", [2, 4, 8])
def cql3_non_compound_range_tombstones_test(self):
"""
Checks that 3.0 serializes RangeTombstoneLists correctly
when communicating with 2.2 nodes.
@jira_ticket CASSANDRA-11930
"""
session = self.prepare(start_rpc=True)
node = self.cluster.nodelist()[0]
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
# create a CF with mixed static and dynamic cols
column_defs = [ColumnDef('static1', 'Int32Type', None, None, None)]
cfdef = CfDef(
keyspace='ks',
name='cf',
column_type='Standard',
comparator_type='AsciiType',
key_validation_class='AsciiType',
default_validation_class='AsciiType',
column_metadata=column_defs)
client.system_add_column_family(cfdef)
session.cluster.control_connection.wait_for_schema_agreement()
for is_upgraded, session, node in self.do_upgrade(session, return_nodes=True):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
upgrade_to_version = LooseVersion(self.get_node_version(is_upgraded=True))
if LooseVersion('3.0.0') <= upgrade_to_version <= LooseVersion('3.0.6'):
self.skip('CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
elif LooseVersion('3.1') <= upgrade_to_version <= LooseVersion('3.6'):
self.skip('CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
session.execute("TRUNCATE ks.cf")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
# insert a number of keys so that we'll get rows on both the old and upgraded nodes
for key in ['key{}'.format(i) for i in range(10)]:
debug("Using key " + key)
# insert "static" column
client.batch_mutate(
{key: {'cf': [Mutation(ColumnOrSuperColumn(column=Column(name='static1', value=struct.pack('>i', 1), timestamp=100)))]}},
ThriftConsistencyLevel.ALL)
# insert "dynamic" columns
for i, column_name in enumerate(('a', 'b', 'c', 'd', 'e')):
column_value = 'val{}'.format(i)
client.batch_mutate(
{key: {'cf': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=column_value, timestamp=100)))]}},
ThriftConsistencyLevel.ALL)
# sanity check on the query
fetch_slice = SlicePredicate(slice_range=SliceRange('', '', False, 100))
row = client.get_slice(key, ColumnParent(column_family='cf'), fetch_slice, ThriftConsistencyLevel.ALL)
self.assertEqual(6, len(row), row)
cols = OrderedDict([(cosc.column.name, cosc.column.value) for cosc in row])
debug(cols)
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'static1'], cols.keys())
self.assertEqual('val0', cols['a'])
self.assertEqual('val4', cols['e'])
self.assertEqual(struct.pack('>i', 1), cols['static1'])
# delete a slice of dynamic columns
slice_range = SliceRange('b', 'd', False, 100)
client.batch_mutate(
{key: {'cf': [Mutation(deletion=Deletion(timestamp=101, predicate=SlicePredicate(slice_range=slice_range)))]}},
ThriftConsistencyLevel.ALL)
# check remaining columns
row = client.get_slice(key, ColumnParent(column_family='cf'), fetch_slice, ThriftConsistencyLevel.ALL)
self.assertEqual(3, len(row), row)
cols = OrderedDict([(cosc.column.name, cosc.column.value) for cosc in row])
debug(cols)
self.assertEqual(['a', 'e', 'static1'], cols.keys())
self.assertEqual('val0', cols['a'])
self.assertEqual('val4', cols['e'])
self.assertEqual(struct.pack('>i', 1), cols['static1'])
def row_existence_test(self):
""" Check the semantic of CQL row existence (part of #4361) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v1 int,
v2 int,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (1, 1, 1, 1)")
assert_one(cursor, "SELECT * FROM test", [1, 1, 1, 1])
assert_invalid(cursor, "DELETE c FROM test WHERE k = 1 AND c = 1")
cursor.execute("DELETE v2 FROM test WHERE k = 1 AND c = 1")
assert_one(cursor, "SELECT * FROM test", [1, 1, 1, None])
cursor.execute("DELETE v1 FROM test WHERE k = 1 AND c = 1")
assert_one(cursor, "SELECT * FROM test", [1, 1, None, None])
cursor.execute("DELETE FROM test WHERE k = 1 AND c = 1")
assert_none(cursor, "SELECT * FROM test", )
cursor.execute("INSERT INTO test (k, c) VALUES (2, 2)")
assert_one(cursor, "SELECT * FROM test", [2, 2, None, None])
@freshCluster()
def only_pk_test(self):
""" Check table with only a PK (#4361) """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test (
k int,
c int,
PRIMARY KEY (k, c)
)
""")
# Check for dense tables too
cursor.execute("""
CREATE TABLE test2 (
k int,
c int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
q = "INSERT INTO test (k, c) VALUES (%s, %s)"
for k in range(0, 2):
for c in range(0, 2):
cursor.execute(q, (k, c))
query = "SELECT * FROM test"
assert_all(cursor, query, [[x, y] for x in range(0, 2) for y in range(0, 2)])
q = "INSERT INTO test2 (k, c) VALUES (%s, %s)"
for k in range(0, 2):
for c in range(0, 2):
cursor.execute(q, (k, c))
query = "SELECT * FROM test2"
assert_all(cursor, query, [[x, y] for x in range(0, 2) for y in range(0, 2)])
def no_clustering_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
for i in range(10):
cursor.execute("INSERT INTO test (k, v) VALUES (%s, %s)", (i, i))
cursor.default_fetch_size = None
assert_all(cursor, "SELECT * FROM test", [[i, i] for i in range(10)], ignore_order=True)
def date_test(self):
""" Check dates are correctly recognized and validated """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
t timestamp
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, t) VALUES (0, '2011-02-03')")
assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, '2011-42-42')")
@freshCluster()
def range_slice_test(self):
""" Test a regression from #1337 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text PRIMARY KEY,
v int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ('foo', 0)")
cursor.execute("INSERT INTO test (k, v) VALUES ('bar', 1)")
assert_row_count(cursor, 'test', 2)
@freshCluster()
def composite_index_with_pk_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE blogs (
blog_id int,
time1 int,
time2 int,
author text,
content text,
PRIMARY KEY (blog_id, time1, time2)
)
""")
cursor.execute("CREATE INDEX ON blogs(author)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE blogs")
req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', '%s')"
cursor.execute(req % (1, 0, 0, 'foo', 'bar1'))
cursor.execute(req % (1, 0, 1, 'foo', 'bar2'))
cursor.execute(req % (2, 1, 0, 'foo', 'baz'))
cursor.execute(req % (3, 0, 1, 'gux', 'qux'))
query = "SELECT blog_id, content FROM blogs WHERE author='foo'"
assert_all(cursor, query, [[1, 'bar1'], [1, 'bar2'], [2, 'baz']])
query = "SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo' ALLOW FILTERING"
assert_one(cursor, query, [2, 'baz'])
query = "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo' ALLOW FILTERING"
assert_one(cursor, query, [2, 'baz'])
query = "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo' ALLOW FILTERING"
assert_one(cursor, query, [2, 'baz'])
query = "SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo' ALLOW FILTERING"
assert_none(cursor, query)
query = "SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo' ALLOW FILTERING"
assert_none(cursor, query)
assert_invalid(cursor, "SELECT content FROM blogs WHERE time2 >= 0 AND author='foo'")
# as discussed in CASSANDRA-8148, some queries that should have required ALLOW FILTERING
# in 2.0 have been fixed for 2.2
if self.get_node_version(is_upgraded) < '2.2':
# the coordinator is the non-upgraded 2.1 node
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo'")
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo'")
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo'")
cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
else:
# the coordinator is the upgraded 2.2+ node
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo'")
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo'")
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo'")
assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
@freshCluster()
def limit_bugs_test(self):
""" Test for LIMIT bugs from 4579 """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE testcf (
a int,
b int,
c int,
d int,
e int,
PRIMARY KEY (a, b)
);
""")
cursor.execute("""
CREATE TABLE testcf2 (
a int primary key,
b int,
c int,
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE testcf")
cursor.execute("TRUNCATE testcf2")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (1, 1, 1, 1, 1);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (2, 2, 2, 2, 2);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (3, 3, 3, 3, 3);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (4, 4, 4, 4, 4);")
assert_all(cursor, "SELECT * FROM testcf", [[1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]])
assert_all(cursor, "SELECT * FROM testcf LIMIT 1;", [[1, 1, 1, 1, 1]])
assert_all(cursor, "SELECT * FROM testcf LIMIT 2;", [[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (1, 1, 1);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (2, 2, 2);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (3, 3, 3);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (4, 4, 4);")
assert_all(cursor, "SELECT * FROM testcf2;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 1;", [[1, 1, 1]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 2;", [[1, 1, 1], [2, 2, 2]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 3;", [[1, 1, 1], [2, 2, 2], [3, 3, 3]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 4;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 5;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
def bug_4532_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE compositetest(
status ascii,
ctime bigint,
key ascii,
nil ascii,
PRIMARY KEY (status, ctime, key)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE compositetest")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key1','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key2','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key3','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key4','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key5','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345680,'key6','')")
assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime>=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3;")
assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3")
@freshCluster()
def order_by_multikey_test(self):
""" Test for #4612 bug and more generaly order by when multiple C* rows are queried """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test(
my_id varchar,
col1 int,
col2 int,
value varchar,
PRIMARY KEY (my_id, col1, col2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = None
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key1', 1, 1, 'a');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key2', 3, 3, 'a');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key3', 2, 2, 'b');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key4', 2, 1, 'b');")
query = "SELECT col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1;"
assert_all(cursor, query, [[1], [2], [3]])
query = "SELECT col1, value, my_id, col2 FROM test WHERE my_id in('key3', 'key4') ORDER BY col1, col2;"
assert_all(cursor, query, [[2, 'b', 'key4', 1], [2, 'b', 'key3', 2]])
assert_invalid(cursor, "SELECT col1 FROM test ORDER BY col1;")
assert_invalid(cursor, "SELECT col1 FROM test WHERE my_id > 'key1' ORDER BY col1;")
def remove_range_slice_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 3):
cursor.execute("INSERT INTO test (k, v) VALUES (%d, %d)" % (i, i))
cursor.execute("DELETE FROM test WHERE k = 1")
assert_all(cursor, "SELECT * FROM test", [[0, 0], [2, 2]])
def indexes_composite_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
blog_id int,
timestamp int,
author text,
content text,
PRIMARY KEY (blog_id, timestamp)
)
""")
cursor.execute("CREATE INDEX ON test(author)")
time.sleep(1)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
req = "INSERT INTO test (blog_id, timestamp, author, content) VALUES (%d, %d, '%s', '%s')"
cursor.execute(req % (0, 0, "bob", "1st post"))
cursor.execute(req % (0, 1, "tom", "2nd post"))
cursor.execute(req % (0, 2, "bob", "3rd post"))
cursor.execute(req % (0, 3, "tom", "4nd post"))
cursor.execute(req % (1, 0, "bob", "5th post"))
query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
assert_all(cursor, query, [[1, 0], [0, 0], [0, 2]])
cursor.execute(req % (1, 1, "tom", "6th post"))
cursor.execute(req % (1, 2, "tom", "7th post"))
cursor.execute(req % (1, 3, "bob", "8th post"))
query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
assert_all(cursor, query, [[1, 0], [1, 3], [0, 0], [0, 2]])
cursor.execute("DELETE FROM test WHERE blog_id = 0 AND timestamp = 2")
query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
assert_all(cursor, query, [[1, 0], [1, 3], [0, 0]])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12364',
flaky=True)
def refuse_in_with_indexes_test(self):
""" Test for the validation bug of #4709 """
cursor = self.prepare()
cursor.execute("create table t1 (pk varchar primary key, col1 varchar, col2 varchar);")
cursor.execute("create index t1_c1 on t1(col1);")
cursor.execute("create index t1_c2 on t1(col2);")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE t1")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1a','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1b','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1c','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk2','foo2','bar2');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk3','foo3','bar3');")
assert_invalid(cursor, "select * from t1 where col2 in ('bar1', 'bar2');")
def reversed_compact_test(self):
""" Test for #4716 bug and more generally for good behavior of ordering"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k text,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (c DESC);
""")
cursor.execute("""
CREATE TABLE test2 (
k text,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
cursor.execute("TRUNCATE test2")
for i in range(0, 10):
cursor.execute("INSERT INTO test1(k, c, v) VALUES ('foo', %s, %s)", (i, i))
query = "SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo'"
assert_all(cursor, query, [[5], [4], [3]])
query = "SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo'"
assert_all(cursor, query, [[6], [5], [4], [3], [2]])
query = "SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[3], [4], [5]])
query = "SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[2], [3], [4], [5], [6]])
query = "SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[5], [4], [3]])
query = "SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[6], [5], [4], [3], [2]])
for i in range(0, 10):
cursor.execute("INSERT INTO test2(k, c, v) VALUES ('foo', %s, %s)", (i, i))
query = "SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo'"
assert_all(cursor, query, [[3], [4], [5]])
query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo'"
assert_all(cursor, query, [[2], [3], [4], [5], [6]])
query = "SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[3], [4], [5]])
query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[2], [3], [4], [5], [6]])
query = "SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[5], [4], [3]])
query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[6], [5], [4], [3], [2]])
def reversed_compact_multikey_test(self):
""" Test for the bug from #4760 and #4759 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key text,
c1 int,
c2 int,
value text,
PRIMARY KEY(key, c1, c2)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY(c1 DESC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 3):
for j in range(0, 3):
cursor.execute("INSERT INTO test(key, c1, c2, value) VALUES ('foo', %i, %i, 'bar');" % (i, j))
# Equalities
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [1, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0]])
# GT
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[2, 0], [2, 1], [2, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0], [1, 2], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0], [1, 2], [1, 1], [1, 0]])
# LT
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1"
assert_all(cursor, query, [[0, 2], [0, 1], [0, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[0, 0], [0, 1], [0, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[0, 2], [0, 1], [0, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC"
assert_all(cursor, query, [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]])
def collection_and_regular_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l list<int>,
c int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, l, c) VALUES(3, [0, 1, 2], 4)")
cursor.execute("UPDATE test SET l[0] = 1, c = 42 WHERE k = 3")
assert_one(cursor, "SELECT l, c FROM test WHERE k = 3", [[1, 1, 2], 42])
def batch_and_list_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l list<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("""
BEGIN BATCH
UPDATE test SET l = l + [ 1 ] WHERE k = 0;
UPDATE test SET l = l + [ 2 ] WHERE k = 0;
UPDATE test SET l = l + [ 3 ] WHERE k = 0;
APPLY BATCH
""")
assert_one(cursor, "SELECT l FROM test WHERE k = 0", [[1, 2, 3]])
cursor.execute("""
BEGIN BATCH
UPDATE test SET l = [ 1 ] + l WHERE k = 1;
UPDATE test SET l = [ 2 ] + l WHERE k = 1;
UPDATE test SET l = [ 3 ] + l WHERE k = 1;
APPLY BATCH
""")
assert_one(cursor, "SELECT l FROM test WHERE k = 1", [[3, 2, 1]])
def boolean_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k boolean PRIMARY KEY,
b boolean
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, b) VALUES (true, false)")
assert_one(cursor, "SELECT * FROM test WHERE k = true", [True, False])
def multiordering_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c1 int,
c2 int,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 2):
for j in range(0, 2):
cursor.execute("INSERT INTO test(k, c1, c2) VALUES ('foo', %i, %i)" % (i, j))
query = "SELECT c1, c2 FROM test WHERE k = 'foo'"
assert_all(cursor, query, [[0, 1], [0, 0], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 DESC"
assert_all(cursor, query, [[0, 1], [0, 0], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 DESC, c2 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [0, 0], [0, 1]])
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 DESC")
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 ASC")
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 ASC")
def bug_4882_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 0, 0);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 1, 1, 1);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 2, 2);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 1, 3, 3);")
query = "SELECT * FROM test WHERE k = 0 LIMIT 1;"
assert_one(cursor, query, [0, 0, 2, 2])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12401',
flaky=False)
def multi_list_set_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l1 list<int>,
l2 list<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, l1, l2) VALUES (0, [1, 2, 3], [4, 5, 6])")
cursor.execute("UPDATE test SET l2[1] = 42, l1[1] = 24 WHERE k = 0")
assert_one(cursor, "SELECT l1, l2 FROM test WHERE k = 0", [[1, 24, 3], [4, 42, 6]])
@freshCluster()
def composite_index_collections_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE blogs (
blog_id int,
time1 int,
time2 int,
author text,
content set<text>,
PRIMARY KEY (blog_id, time1, time2)
)
""")
cursor.execute("CREATE INDEX ON blogs(author)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE blogs")
req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', %s)"
cursor.execute(req % (1, 0, 0, 'foo', "{ 'bar1', 'bar2' }"))
cursor.execute(req % (1, 0, 1, 'foo', "{ 'bar2', 'bar3' }"))
cursor.execute(req % (2, 1, 0, 'foo', "{ 'baz' }"))
cursor.execute(req % (3, 0, 1, 'gux', "{ 'qux' }"))
query = "SELECT blog_id, content FROM blogs WHERE author='foo'"
assert_all(cursor, query, [[1, set(['bar1', 'bar2'])], [1, set(['bar2', 'bar3'])], [2, set(['baz'])]])
@freshCluster()
def truncate_clean_cache_test(self):
cursor = self.prepare(ordered=True, use_cache=True)
if self.node_version_above('2.1'):
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
) WITH caching = {'keys': 'NONE', 'rows_per_partition': 'ALL'};
""")
else:
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
) WITH CACHING = ALL;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 3):
cursor.execute("INSERT INTO test(k, v1, v2) VALUES (%d, %d, %d)" % (i, i, i * 2))
query = "SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)"
assert_all(cursor, query, [[0, 0], [1, 2], [2, 4]])
cursor.execute("TRUNCATE test")
query = "SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)"
assert_none(cursor, query)
def range_with_deletes_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int,
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
nb_keys = 30
nb_deletes = 5
for i in range(0, nb_keys):
cursor.execute("INSERT INTO test(k, v) VALUES ({}, {})".format(i, i))
for i in random.sample(xrange(nb_keys), nb_deletes):
cursor.execute("DELETE FROM test WHERE k = {}".format(i))
res = list(cursor.execute("SELECT * FROM test LIMIT {}".format(nb_keys / 2)))
assert_length_equal(res, nb_keys / 2)
def collection_function_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l set<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "SELECT ttl(l) FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT writetime(l) FROM test WHERE k = 0")
def composite_partition_key_validation_test(self):
""" Test for bug from #5122 """
cursor = self.prepare()
cursor.execute("CREATE TABLE foo (a int, b text, c uuid, PRIMARY KEY ((a, b)));")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE foo")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'aze', 4d481800-4c5f-11e1-82e0-3f484de45426)")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'ert', 693f5800-8acb-11e3-82e0-3f484de45426)")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'opl', d4815800-2d8d-11e0-82e0-3f484de45426)")
assert_row_count(cursor, 'foo', 3)
assert_invalid(cursor, "SELECT * FROM foo WHERE a=1")
@since('2.2')
def multi_in_test(self):
self.__multi_in(False)
@since('2.2')
def multi_in_compact_test(self):
self.__multi_in(True)
def __multi_in(self, compact):
cursor = self.prepare()
data = [
('test', '06029', 'CT', 9, 'Ellington'),
('test', '06031', 'CT', 9, 'Falls Village'),
('test', '06902', 'CT', 9, 'Stamford'),
('test', '06927', 'CT', 9, 'Stamford'),
('test', '10015', 'NY', 36, 'New York'),
('test', '07182', 'NJ', 34, 'Newark'),
('test', '73301', 'TX', 48, 'Austin'),
('test', '94102', 'CA', 6, 'San Francisco'),
('test2', '06029', 'CT', 9, 'Ellington'),
('test2', '06031', 'CT', 9, 'Falls Village'),
('test2', '06902', 'CT', 9, 'Stamford'),
('test2', '06927', 'CT', 9, 'Stamford'),
('test2', '10015', 'NY', 36, 'New York'),
('test2', '07182', 'NJ', 34, 'Newark'),
('test2', '73301', 'TX', 48, 'Austin'),
('test2', '94102', 'CA', 6, 'San Francisco'),
]
create = """
CREATE TABLE zipcodes (
group text,
zipcode text,
state text,
fips_regions int,
city text,
PRIMARY KEY(group,zipcode,state,fips_regions)
)"""
if compact:
create = create + " WITH COMPACT STORAGE"
cursor.execute(create)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE zipcodes")
for d in data:
cursor.execute("INSERT INTO zipcodes (group, zipcode, state, fips_regions, city) VALUES ('%s', '%s', '%s', %s, '%s')" % d)
res = list(cursor.execute("select zipcode from zipcodes"))
assert_length_equal(res, 16)
res = list(cursor.execute("select zipcode from zipcodes where group='test'"))
assert_length_equal(res, 8)
assert_invalid(cursor, "select zipcode from zipcodes where zipcode='06902'")
res = list(cursor.execute("select zipcode from zipcodes where zipcode='06902' ALLOW FILTERING"))
assert_length_equal(res, 2)
res = list(cursor.execute("select zipcode from zipcodes where group='test' and zipcode='06902'"))
assert_length_equal(res, 1)
if is_upgraded:
# the coordinator is the upgraded 2.2+ node
res = list(cursor.execute("select zipcode from zipcodes where group='test' and zipcode IN ('06902','73301','94102')"))
assert_length_equal(res, 3)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA')"))
assert_length_equal(res, 2)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions = 9"))
assert_length_equal(res, 1)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') ORDER BY zipcode DESC"))
assert_length_equal(res, 2)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions > 0"))
assert_length_equal(res, 2)
assert_none(cursor, "select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions < 0")
@since('2.2')
def multi_in_compact_non_composite_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key int,
c int,
v int,
PRIMARY KEY (key, c)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 1, 1)")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 2, 2)")
query = "SELECT * FROM test WHERE key=0 AND c IN (0, 2)"
assert_all(cursor, query, [[0, 0, 0], [0, 2, 2]])
def large_clustering_in_test(self):
# Test for CASSANDRA-8410
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
insert_statement = cursor.prepare("INSERT INTO test (k, c, v) VALUES (?, ?, ?)")
cursor.execute(insert_statement, (0, 0, 0))
select_statement = cursor.prepare("SELECT * FROM test WHERE k=? AND c IN ?")
in_values = list(range(10000))
# try to fetch one existing row and 9999 non-existing rows
rows = list(cursor.execute(select_statement, [0, in_values]))
assert_length_equal(rows, 1)
self.assertEqual((0, 0, 0), rows[0])
# insert approximately 1000 random rows between 0 and 10k
clustering_values = set([random.randint(0, 9999) for _ in range(1000)])
clustering_values.add(0)
args = [(0, i, i) for i in clustering_values]
execute_concurrent_with_args(cursor, insert_statement, args)
rows = list(cursor.execute(select_statement, [0, in_values]))
assert_length_equal(rows, len(clustering_values))
def timeuuid_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
t timeuuid,
PRIMARY KEY (k, t)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, 2012-11-07 18:18:22-0800)", expected=SyntaxException)
for i in range(4):
cursor.execute("INSERT INTO test (k, t) VALUES (0, now())")
time.sleep(1)
assert_row_count(cursor, 'test', 4)
res = list(cursor.execute("SELECT * FROM test"))
dates = [d[1] for d in res]
assert_row_count(cursor, 'test', 4, where="k = 0 AND t >= {}".format(dates[0]))
assert_row_count(cursor, 'test', 0, where="k = 0 AND t < {}".format(dates[0]))
assert_row_count(cursor, 'test', 2, where="k = 0 AND t > {} AND t <= {}".format(dates[0], dates[2]))
assert_row_count(cursor, 'test', 1, where="k = 0 AND t = {}".format(dates[0]))
assert_invalid(cursor, "SELECT dateOf(k) FROM test WHERE k = 0 AND t = %s" % dates[0])
cursor.execute("SELECT dateOf(t), unixTimestampOf(t) FROM test WHERE k = 0 AND t = %s" % dates[0])
cursor.execute("SELECT t FROM test WHERE k = 0 AND t > maxTimeuuid(1234567) AND t < minTimeuuid('2012-11-07 18:18:22-0800')")
# not sure what to check exactly so just checking the query returns
def float_with_exponent_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
d double,
f float
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, d, f) VALUES (0, 3E+10, 3.4E3)")
cursor.execute("INSERT INTO test(k, d, f) VALUES (1, 3.E10, -23.44E-3)")
cursor.execute("INSERT INTO test(k, d, f) VALUES (2, 3, -2)")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12194',
flaky=False)
def compact_metadata_test(self):
""" Test regression from #5189 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE bar (
id int primary key,
i int
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE bar")
cursor.execute("INSERT INTO bar (id, i) VALUES (1, 2);")
assert_one(cursor, "SELECT * FROM bar", [1, 2])
def query_compact_tables_during_upgrade_test(self):
"""
Check that un-upgraded sstables for compact storage tables
can be read after an upgrade. Checks for a regression where
when the coordinator is on < 3.0, a replica at >= 3.0 returns
0 results for any read request. When the >= 3.0 node is
the coordinator, the problem does not manifest. Likewise, if
the data is inserted after the replica is upgraded, or if
upgradesstables is run after upgrade, the query succeeds, so
the issue is with reading legacy format sstables in response to
a legacy format read request
@jira_ticket CASSANDRA-11087
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE t1 (
a int PRIMARY KEY,
b int
) WITH COMPACT STORAGE;
""")
execute_concurrent_with_args(cursor,
cursor.prepare("INSERT INTO t1 (a, b) VALUES (?, ?)"),
[(i, i) for i in xrange(100)])
self.cluster.flush()
def check_read_all(cursor):
read_count = 0
# first read each row separately - obviously, we should be able to retrieve all 100
for i in xrange(100):
res = cursor.execute("SELECT * FROM t1 WHERE a = {a}".format(a=i))
read_count += len(rows_to_list(res))
debug("Querying for individual keys retrieved {c} results".format(c=read_count))
self.assertEqual(read_count, 100)
# now a range slice, again all 100 rows should be retrievable
res = rows_to_list(cursor.execute("SELECT * FROM t1"))
read_count = len(res)
debug("Range request retrieved {c} rows".format(c=read_count))
assert_length_equal(res, 100)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {state} node".format(state="upgraded" if is_upgraded else "old"))
check_read_all(cursor)
debug("Querying upgraded node after running upgradesstables")
node1 = self.cluster.nodelist()[0]
node1.nodetool("upgradesstables -a")
check_read_all(self.patient_exclusive_cql_connection(node1, keyspace="ks"))
def clustering_indexing_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE posts (
id1 int,
id2 int,
author text,
time bigint,
v1 text,
v2 text,
PRIMARY KEY ((id1, id2), author, time)
)
""")
cursor.execute("CREATE INDEX ON posts(time)")
cursor.execute("CREATE INDEX ON posts(id2)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE posts")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 0, 'A', 'A')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 1, 'B', 'B')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 1, 'bob', 2, 'C', 'C')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'tom', 0, 'D', 'D')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 1, 'tom', 1, 'E', 'E')")
query = "SELECT v1 FROM posts WHERE time = 1"
assert_all(cursor, query, [['B'], ['E']])
query = "SELECT v1 FROM posts WHERE id2 = 1"
assert_all(cursor, query, [['C'], ['E']])
query = "SELECT v1 FROM posts WHERE id1 = 0 AND id2 = 0 AND author = 'bob' AND time = 0"
assert_one(cursor, query, ['A'])
# Test for CASSANDRA-8206
cursor.execute("UPDATE posts SET v2 = null WHERE id1 = 0 AND id2 = 0 AND author = 'bob' AND time = 1")
query = "SELECT v1 FROM posts WHERE id2 = 0"
assert_all(cursor, query, [['A'], ['B'], ['D']])
query = "SELECT v1 FROM posts WHERE time = 1"
assert_all(cursor, query, [['B'], ['E']])
def edge_2i_on_complex_pk_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE indexed (
pk0 int,
pk1 int,
ck0 int,
ck1 int,
ck2 int,
value int,
PRIMARY KEY ((pk0, pk1), ck0, ck1, ck2)
)
""")
cursor.execute("CREATE INDEX ON indexed(pk0)")
cursor.execute("CREATE INDEX ON indexed(ck0)")
cursor.execute("CREATE INDEX ON indexed(ck1)")
cursor.execute("CREATE INDEX ON indexed(ck2)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE indexed")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (0, 1, 2, 3, 4, 5)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (1, 2, 3, 4, 5, 0)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (2, 3, 4, 5, 0, 1)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (3, 4, 5, 0, 1, 2)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (4, 5, 0, 1, 2, 3)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (5, 0, 1, 2, 3, 4)")
assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 2", [[1]])
assert_all(cursor, "SELECT value FROM indexed WHERE ck0 = 0", [[3]])
assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 3 AND pk1 = 4 AND ck1 = 0", [[2]])
assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 5 AND pk1 = 0 AND ck0 = 1 AND ck2 = 3 ALLOW FILTERING", [[4]])
def bug_5240_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
interval text,
seq int,
id int,
severity int,
PRIMARY KEY ((interval, seq), id)
) WITH CLUSTERING ORDER BY (id DESC);
""")
cursor.execute("CREATE INDEX ON test(severity);")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 1, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 2, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 3, 2);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 4, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 1, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 2, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 3, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 4, 2);")
query = "select * from test where severity = 3 and interval = 't' and seq =1;"
assert_one(cursor, query, ['t', 1, 4, 3])
def ticket_5230_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE foo (
key text,
c text,
v text,
PRIMARY KEY (key, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE foo")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '1', '1')")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '2', '2')")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '3', '3')")
query = "SELECT c FROM foo WHERE key = 'foo' AND c IN ('1', '2');"
assert_all(cursor, query, [['1'], ['2']])
def conversion_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
i varint,
b blob
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, i, b) VALUES (0, blobAsVarint(bigintAsBlob(3)), textAsBlob('foobar'))")
query = "SELECT i, blobAsText(b) FROM test WHERE k = 0"
assert_one(cursor, query, [3, 'foobar'])
def bug_5376_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key text,
c bigint,
v text,
x set<text>,
PRIMARY KEY (key, c)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "select * from test where key = 'foo' and c in (1,3,4);")
def function_and_reverse_type_test(self):
""" Test for #5386 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c timeuuid,
v int,
PRIMARY KEY (k, c)
) WITH CLUSTERING ORDER BY (c DESC)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, now(), 0);")
def bug_5404_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (key text PRIMARY KEY)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
# We just want to make sure this doesn't NPE server side
assert_invalid(cursor, "select * from test where token(key) > token(int(3030343330393233)) limit 1;")
def empty_blob_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, b blob)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, b) VALUES (0, 0x)")
assert_one(cursor, "SELECT * FROM test", [0, ''])
def rename_test(self):
cursor = self.prepare(start_rpc=True)
node = self.cluster.nodelist()[0]
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
cfdef = CfDef()
cfdef.keyspace = 'ks'
cfdef.name = 'test'
cfdef.column_type = 'Standard'
cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)'
cfdef.key_validation_class = 'UTF8Type'
cfdef.default_validation_class = 'UTF8Type'
client.set_keyspace('ks')
client.system_add_column_family(cfdef)
time.sleep(1)
cursor.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')")
time.sleep(1)
cursor.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_one(cursor, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def clustering_order_and_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
t timeuuid,
PRIMARY KEY (k, t)
) WITH CLUSTERING ORDER BY (t DESC)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 5):
cursor.execute("INSERT INTO test (k, t) VALUES (%d, now())" % i)
cursor.execute("SELECT dateOf(t) FROM test")
def conditional_update_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 text,
v3 int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Shouldn't apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF v1 = 4", [False])
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF EXISTS", [False])
# Should apply
assert_one(cursor, "INSERT INTO test (k, v1, v2) VALUES (0, 2, 'foo') IF NOT EXISTS", [True])
# Shouldn't apply
assert_one(cursor, "INSERT INTO test (k, v1, v2) VALUES (0, 5, 'bar') IF NOT EXISTS", [False, 0, 2, 'foo', None])
assert_one(cursor, "SELECT * FROM test", [0, 2, 'foo', None], cl=ConsistencyLevel.SERIAL)
# Should not apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF v1 = 4", [False, 2])
assert_one(cursor, "SELECT * FROM test", [0, 2, 'foo', None], cl=ConsistencyLevel.SERIAL)
# Should apply (note: we want v2 before v1 in the statement order to exercise #5786)
assert_one(cursor, "UPDATE test SET v2 = 'bar', v1 = 3 WHERE k = 0 IF v1 = 2", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar', v1 = 3 WHERE k = 0 IF EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [0, 3, 'bar', None], cl=ConsistencyLevel.SERIAL)
# Shouldn't apply, only one condition is ok
assert_one(cursor, "UPDATE test SET v1 = 5, v2 = 'foobar' WHERE k = 0 IF v1 = 3 AND v2 = 'foo'", [False, 3, 'bar'])
assert_one(cursor, "SELECT * FROM test", [0, 3, 'bar', None], cl=ConsistencyLevel.SERIAL)
# Should apply
assert_one(cursor, "UPDATE test SET v1 = 5, v2 = 'foobar' WHERE k = 0 IF v1 = 3 AND v2 = 'bar'", [True])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None], cl=ConsistencyLevel.SERIAL)
# Shouldn't apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = 3", [False, 5])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None], cl=ConsistencyLevel.SERIAL)
# Shouldn't apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = null", [False, 5])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None], cl=ConsistencyLevel.SERIAL)
# Should apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = 5", [True])
assert_one(cursor, "SELECT * FROM test", [0, 5, None, None], cl=ConsistencyLevel.SERIAL)
# Shouln't apply
assert_one(cursor, "DELETE v1 FROM test WHERE k = 0 IF v3 = 4", [False, None])
# Should apply
assert_one(cursor, "DELETE v1 FROM test WHERE k = 0 IF v3 = null", [True])
assert_one(cursor, "SELECT * FROM test", [0, None, None, None], cl=ConsistencyLevel.SERIAL)
# Should apply
assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 = null", [True])
assert_none(cursor, "SELECT * FROM test", cl=ConsistencyLevel.SERIAL)
# Shouldn't apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF EXISTS", [False])
if self.get_version() > "2.1.1":
# Should apply
assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 IN (null)", [True])
@since('2.1.1')
def non_eq_conditional_update_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 text,
v3 int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# non-EQ conditions
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 2, 'foo')")
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 < 3", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 <= 3", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 > 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 >= 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 != 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 != 2", [False, 2])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (0, 1, 2)", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (142, 276)", [False, 2])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN ()", [False, 2])
def conditional_delete_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
)
""")
# static columns
cursor.execute("""
CREATE TABLE test2 (
k text,
s text static,
i int,
v text,
PRIMARY KEY (k, i)
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
assert_one(cursor, "INSERT INTO test (k, v1) VALUES (1, 2) IF NOT EXISTS", [True])
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [True])
assert_none(cursor, "SELECT * FROM test WHERE k=1", cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
assert_one(cursor, "INSERT INTO test (k, v1) VALUES (2, 2) IF NOT EXISTS USING TTL 1", [True])
time.sleep(1.5)
assert_one(cursor, "DELETE FROM test WHERE k=2 IF EXISTS", [False])
assert_none(cursor, "SELECT * FROM test WHERE k=2", cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "INSERT INTO test (k, v1) VALUES (3, 2) IF NOT EXISTS", [True])
assert_one(cursor, "DELETE v1 FROM test WHERE k=3 IF EXISTS", [True])
assert_one(cursor, "SELECT * FROM test WHERE k=3", [3, None], cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE v1 FROM test WHERE k=3 IF EXISTS", [True])
assert_one(cursor, "DELETE FROM test WHERE k=3 IF EXISTS", [True])
cursor.execute("INSERT INTO test2 (k, s, i, v) VALUES ('k', 's', 0, 'v') IF NOT EXISTS")
assert_one(cursor, "DELETE v FROM test2 WHERE k='k' AND i=0 IF EXISTS", [True])
assert_one(cursor, "DELETE FROM test2 WHERE k='k' AND i=0 IF EXISTS", [True])
assert_one(cursor, "DELETE v FROM test2 WHERE k='k' AND i=0 IF EXISTS", [False])
assert_one(cursor, "DELETE FROM test2 WHERE k='k' AND i=0 IF EXISTS", [False])
# CASSANDRA-6430
v = self.get_version()
if v >= "2.1.1" or v < "2.1" and v >= "2.0.11":
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 'k' IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 'k' IF v = 'foo'")
assert_invalid(cursor, "DELETE FROM test2 WHERE i = 0 IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF v = 'foo'")
@freshCluster()
def range_key_ordered_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("CREATE TABLE test ( k int PRIMARY KEY)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k) VALUES (-1)")
cursor.execute("INSERT INTO test(k) VALUES ( 0)")
cursor.execute("INSERT INTO test(k) VALUES ( 1)")
assert_all(cursor, "SELECT * FROM test", [[0], [1], [-1]])
assert_invalid(cursor, "SELECT * FROM test WHERE k >= -1 AND k < 1;")
def select_with_alias_test(self):
cursor = self.prepare()
cursor.execute('CREATE TABLE users (id int PRIMARY KEY, name text)')
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
for id in range(0, 5):
cursor.execute("INSERT INTO users (id, name) VALUES ({}, 'name{}') USING TTL 10 AND TIMESTAMP 0".format(id, id))
# test aliasing count(*)
res = cursor.execute('SELECT count(*) AS user_count FROM users')
self.assertEqual('user_count', res[0]._fields[0])
self.assertEqual(5, res[0].user_count)
# test aliasing regular value
res = cursor.execute('SELECT name AS user_name FROM users WHERE id = 0')
self.assertEqual('user_name', res[0]._fields[0])
self.assertEqual('name0', res[0].user_name)
# test aliasing writetime
res = cursor.execute('SELECT writeTime(name) AS name_writetime FROM users WHERE id = 0')
self.assertEqual('name_writetime', res[0]._fields[0])
self.assertEqual(0, res[0].name_writetime)
# test aliasing ttl
res = cursor.execute('SELECT ttl(name) AS name_ttl FROM users WHERE id = 0')
self.assertEqual('name_ttl', res[0]._fields[0])
self.assertIn(res[0].name_ttl, (9, 10))
# test aliasing a regular function
res = cursor.execute('SELECT intAsBlob(id) AS id_blob FROM users WHERE id = 0')
self.assertEqual('id_blob', res[0]._fields[0])
self.assertEqual('\x00\x00\x00\x00', res[0].id_blob)
if LooseVersion(self.get_node_version(is_upgraded)) < LooseVersion('3.8'):
error_msg = "Aliases aren't allowed in the where clause"
else:
error_msg = "Undefined column name"
# test that select throws a meaningful exception for aliases in where clause
assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE user_id = 0', matching=error_msg)
if LooseVersion(self.get_node_version(is_upgraded)) < LooseVersion('3.8'):
error_msg = "Aliases are not allowed in order by clause"
# test that select throws a meaningful exception for aliases in order by clause
assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE id IN (0) ORDER BY user_name', matching=error_msg)
def nonpure_function_collection_test(self):
""" Test for bug #5795 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<timeuuid>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
# we just want to make sure this doesn't throw
cursor.execute("INSERT INTO test(k, v) VALUES (0, [now()])")
def empty_in_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k1 int, k2 int, v int, PRIMARY KEY (k1, k2))")
# Same test, but for compact
cursor.execute("CREATE TABLE test_compact (k1 int, k2 int, v int, PRIMARY KEY (k1, k2)) WITH COMPACT STORAGE")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test_compact")
def fill(table):
for i in range(0, 2):
for j in range(0, 2):
cursor.execute("INSERT INTO %s (k1, k2, v) VALUES (%d, %d, %d)" % (table, i, j, i + j))
def assert_nothing_changed(table):
assert_all(cursor, "SELECT * FROM {}".format(table), [[1, 0, 1], [1, 1, 2], [0, 0, 0], [0, 1, 1]])
# Inserts a few rows to make sure we don't actually query something
fill("test")
# Test empty IN () in SELECT
assert_none(cursor, "SELECT v FROM test WHERE k1 IN ()")
assert_none(cursor, "SELECT v FROM test WHERE k1 = 0 AND k2 IN ()")
# Test empty IN () in DELETE
cursor.execute("DELETE FROM test WHERE k1 IN ()")
assert_nothing_changed("test")
# Test empty IN () in UPDATE
cursor.execute("UPDATE test SET v = 3 WHERE k1 IN () AND k2 = 2")
assert_nothing_changed("test")
fill("test_compact")
assert_none(cursor, "SELECT v FROM test_compact WHERE k1 IN ()")
assert_none(cursor, "SELECT v FROM test_compact WHERE k1 = 0 AND k2 IN ()")
# Test empty IN () in DELETE
cursor.execute("DELETE FROM test_compact WHERE k1 IN ()")
assert_nothing_changed("test_compact")
# Test empty IN () in UPDATE
cursor.execute("UPDATE test_compact SET v = 3 WHERE k1 IN () AND k2 = 2")
assert_nothing_changed("test_compact")
def collection_flush_test(self):
""" Test for 5805 bug """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, s set<int>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, s) VALUES (1, {1})")
self.cluster.flush()
cursor.execute("INSERT INTO test(k, s) VALUES (1, {2})")
self.cluster.flush()
assert_one(cursor, "SELECT * FROM test", [1, set([2])])
def select_distinct_test(self):
cursor = self.prepare(ordered=True)
# Test a regular (CQL3) table.
cursor.execute('CREATE TABLE regular (pk0 int, pk1 int, ck0 int, val int, PRIMARY KEY((pk0, pk1), ck0))')
# Test a 'compact storage' table.
cursor.execute('CREATE TABLE compact (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE')
# Test a 'wide row' thrift table.
cursor.execute('CREATE TABLE wide (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE')
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE regular")
cursor.execute("TRUNCATE compact")
cursor.execute("TRUNCATE wide")
for i in xrange(0, 3):
cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 0, 0)' % (i, i))
cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 1, 1)' % (i, i))
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM regular LIMIT 1', [[0, 0]])
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM regular LIMIT 3', [[0, 0], [1, 1], [2, 2]])
for i in xrange(0, 3):
cursor.execute('INSERT INTO compact (pk0, pk1, val) VALUES (%d, %d, %d)' % (i, i, i))
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM compact LIMIT 1', [[0, 0]])
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM compact LIMIT 3', [[0, 0], [1, 1], [2, 2]])
for i in xrange(0, 3):
cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name0', 0)" % i)
cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name1', 1)" % i)
assert_all(cursor, 'SELECT DISTINCT pk FROM wide LIMIT 1', [[0]])
assert_all(cursor, 'SELECT DISTINCT pk FROM wide LIMIT 3', [[0], [1], [2]])
# Test selection validation.
assert_invalid(cursor, 'SELECT DISTINCT pk0 FROM regular', matching="queries must request all the partition key columns")
assert_invalid(cursor, 'SELECT DISTINCT pk0, pk1, ck0 FROM regular', matching="queries must only request partition key columns")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11126',
flaky=False)
def select_distinct_with_deletions_test(self):
cursor = self.prepare()
cursor.execute('CREATE TABLE t1 (k int PRIMARY KEY, c int, v int)')
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE t1")
for i in range(10):
cursor.execute('INSERT INTO t1 (k, c, v) VALUES (%d, %d, %d)' % (i, i, i))
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
assert_length_equal(rows, 10)
key_to_delete = rows[3].k
cursor.execute('DELETE FROM t1 WHERE k=%d' % (key_to_delete,))
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
assert_length_equal(rows, 9)
rows = list(cursor.execute('SELECT DISTINCT k FROM t1 LIMIT 5'))
assert_length_equal(rows, 5)
cursor.default_fetch_size = 5
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
assert_length_equal(rows, 9)
def function_with_null_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
t timeuuid
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k) VALUES (0)")
assert_one(cursor, "SELECT dateOf(t) FROM test WHERE k=0", [None])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12260',
flaky=False)
@freshCluster()
def cas_simple_test(self):
# cursor = self.prepare(nodes=3, rf=3)
cursor = self.prepare()
cursor.execute("CREATE TABLE tkns (tkn int, consumed boolean, PRIMARY KEY (tkn));")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tkns")
for i in range(1, 10):
query = SimpleStatement("INSERT INTO tkns (tkn, consumed) VALUES ({},FALSE);".format(i), consistency_level=ConsistencyLevel.QUORUM)
cursor.execute(query)
assert_one(cursor, "UPDATE tkns SET consumed = TRUE WHERE tkn = {} IF consumed = FALSE;".format(i), [True], cl=ConsistencyLevel.QUORUM)
assert_one(cursor, "UPDATE tkns SET consumed = TRUE WHERE tkn = {} IF consumed = FALSE;".format(i), [False, True], cl=ConsistencyLevel.QUORUM)
def bug_6050_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
a int,
b int
)
""")
cursor.execute("CREATE INDEX ON test(a)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "SELECT * FROM test WHERE a = 3 AND b IN (1, 3)")
def bug_6069_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
s set<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
assert_one(cursor, "INSERT INTO test(k, s) VALUES (0, {1, 2, 3}) IF NOT EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [0, {1, 2, 3}], cl=ConsistencyLevel.SERIAL)
def bug_6115_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, v int, PRIMARY KEY (k, v))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES (0, 1)")
cursor.execute("BEGIN BATCH DELETE FROM test WHERE k=0 AND v=1; INSERT INTO test (k, v) VALUES (0, 2); APPLY BATCH")
assert_one(cursor, "SELECT * FROM test", [0, 2])
def column_name_validation_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c int,
v timeuuid,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "INSERT INTO test(k, c) VALUES ('', 0)")
# Insert a value that don't fit 'int'
assert_invalid(cursor, "INSERT INTO test(k, c) VALUES (0, 10000000000)")
# Insert a non-version 1 uuid
assert_invalid(cursor, "INSERT INTO test(k, c, v) VALUES (0, 0, 550e8400-e29b-41d4-a716-446655440000)")
@since('2.1')
def user_types_test(self):
cursor = self.prepare()
userID_1 = uuid4()
stmt = """
CREATE TYPE address (
street text,
city text,
zip_code int,
phones set<text>
)
"""
cursor.execute(stmt)
stmt = """
CREATE TYPE fullname (
firstname text,
lastname text
)
"""
cursor.execute(stmt)
stmt = """
CREATE TABLE users (
id uuid PRIMARY KEY,
name frozen<fullname>,
addresses map<text, frozen<address>>
)
"""
cursor.execute(stmt)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
stmt = """
INSERT INTO users (id, name)
VALUES ({id}, {{ firstname: 'Paul', lastname: 'smith'}});
""".format(id=userID_1)
cursor.execute(stmt)
stmt = """
SELECT name.firstname FROM users WHERE id = {id}
""".format(id=userID_1)
assert_one(cursor, stmt, ['Paul'])
assert_one(cursor, "SELECT name.firstname FROM users WHERE id = {id}".format(id=userID_1), ['Paul'])
stmt = """
UPDATE users
SET addresses = addresses + {{ 'home': {{ street: '...', city: 'SF', zip_code: 94102, phones: {{}} }} }}
WHERE id={id};
""".format(id=userID_1)
cursor.execute(stmt)
stmt = """
SELECT addresses FROM users WHERE id = {id}
""".format(id=userID_1)
# TODO: deserialize the value here and check it's right.
@since('2.1')
def more_user_types_test(self):
""" user type test that does a little more nesting"""
cursor = self.prepare()
cursor.execute("""
CREATE TYPE type1 (
s set<text>,
m map<text, text>,
l list<text>
)
""")
cursor.execute("""
CREATE TYPE type2 (
s set<frozen<type1>>,
)
""")
cursor.execute("""
CREATE TABLE test (id int PRIMARY KEY, val frozen<type2>)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(id, val) VALUES (0, { s : {{ s : {'foo', 'bar'}, m : { 'foo' : 'bar' }, l : ['foo', 'bar']} }})")
# TODO: check result once we have an easy way to do it. For now we just check it doesn't crash
cursor.execute("SELECT * FROM test")
def bug_6327_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k, v)
)
""")
cursor.execute("""
CREATE TABLE test2 (
k int,
v int,
c1 int,
c2 int,
PRIMARY KEY (k, v)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES (0, 0)")
self.cluster.flush()
assert_one(cursor, "SELECT v FROM test WHERE k=0 AND v IN (1, 0)", [0])
assert_one(cursor, "SELECT v FROM test WHERE v IN (1, 0) ALLOW FILTERING", [0])
cursor.execute("INSERT INTO test2 (k, v) VALUES (0, 0)")
self.cluster.flush()
assert_one(cursor, "SELECT v FROM test2 WHERE k=0 AND v IN (1, 0)", [0])
assert_one(cursor, "SELECT v FROM test2 WHERE v IN (1, 0) ALLOW FILTERING", [0])
cursor.execute("DELETE FROM test2 WHERE k = 0")
cursor.execute("UPDATE test2 SET c2 = 1 WHERE k = 0 AND v = 0")
assert_one(cursor, "SELECT v FROM test2 WHERE k=0 AND v IN (1, 0)", [0])
cursor.execute("DELETE c2 FROM test2 WHERE k = 0 AND v = 0")
assert_none(cursor, "SELECT v FROM test2 WHERE k=0 AND v IN (1, 0)")
assert_none(cursor, "SELECT v FROM test2 WHERE v IN (1, 0) ALLOW FILTERING")
def large_count_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = 10000
# We know we page at 10K, so test counting just before, at 10K, just after and
# a bit after that.
insert_statement = cursor.prepare("INSERT INTO test(k) VALUES (?)")
execute_concurrent_with_args(cursor, insert_statement, [(i,) for i in range(1, 10000)])
assert_one(cursor, "SELECT COUNT(*) FROM test", [9999])
cursor.execute(insert_statement, (10000,))
assert_one(cursor, "SELECT COUNT(*) FROM test", [10000])
cursor.execute(insert_statement, (10001,))
assert_one(cursor, "SELECT COUNT(*) FROM test", [10001])
execute_concurrent_with_args(cursor, insert_statement, [(i,) for i in range(10002, 15001)])
assert_one(cursor, "SELECT COUNT(*) FROM test", [15000])
@since('2.1')
def collection_indexing_test(self):
"""
@jira_ticket CASSANDRA-4511
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
l list<int>,
s set<text>,
m map<text, int>,
PRIMARY KEY (k, v)
)
""")
cursor.execute("CREATE INDEX ON test(l)")
cursor.execute("CREATE INDEX ON test(s)")
cursor.execute("CREATE INDEX ON test(m)")
time.sleep(5.0)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 0, [1, 2], {'a'}, {'a' : 1})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 1, [3, 4], {'b', 'c'}, {'a' : 1, 'b' : 2})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 2, [1], {'a', 'c'}, {'c' : 3})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (1, 0, [1, 2, 4], {}, {'b' : 1})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (1, 1, [4, 5], {'d'}, {'a' : 1, 'b' : 3})")
# lists
assert_all(cursor, "SELECT k, v FROM test WHERE l CONTAINS 1", [[1, 0], [0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND l CONTAINS 1", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE l CONTAINS 2", [[1, 0], [0, 0]])
assert_none(cursor, "SELECT k, v FROM test WHERE l CONTAINS 6")
# sets
assert_all(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'a'", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND s CONTAINS 'a'", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'd'", [[1, 1]])
assert_none(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'e'")
# maps
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS 1", [[1, 0], [1, 1], [0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND m CONTAINS 1", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS 2", [[0, 1]])
assert_none(cursor, "SELECT k, v FROM test WHERE m CONTAINS 4")
@since('2.1')
def map_keys_indexing_test(self):
"""
@jira_ticket CASSANDRA-6383
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
m map<text, int>,
PRIMARY KEY (k, v)
)
""")
cursor.execute("CREATE INDEX ON test(keys(m))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 0, {'a' : 1})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 1, {'a' : 1, 'b' : 2})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 2, {'c' : 3})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (1, 0, {'b' : 1})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (1, 1, {'a' : 1, 'b' : 3})")
# maps
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'a'", [[1, 1], [0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND m CONTAINS KEY 'a'", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'c'", [[0, 2]])
assert_none(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'd'")
def nan_infinity_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (f float PRIMARY KEY)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(f) VALUES (NaN)")
cursor.execute("INSERT INTO test(f) VALUES (-NaN)")
cursor.execute("INSERT INTO test(f) VALUES (Infinity)")
cursor.execute("INSERT INTO test(f) VALUES (-Infinity)")
selected = rows_to_list(cursor.execute("SELECT * FROM test"))
# selected should be [[nan], [inf], [-inf]],
# but assert element-wise because NaN != NaN
assert_length_equal(selected, 3)
assert_length_equal(selected[0], 1)
self.assertTrue(math.isnan(selected[0][0]))
self.assertEqual(selected[1], [float("inf")])
self.assertEqual(selected[2], [float("-inf")])
def static_columns_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
v int,
PRIMARY KEY (k, p)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, s) VALUES (0, 42)")
assert_one(cursor, "SELECT * FROM test", [0, None, 42, None])
# Check that writetime works (#7081) -- we can't predict the exact value easily so
# we just check that it's non zero
row = cursor.execute("SELECT s, writetime(s) FROM test WHERE k=0")
self.assertTrue(list(row[0])[0] == 42 and list(row[0])[1] > 0)
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 0, 12, 0)")
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 1, 24, 1)")
# Check the static columns in indeed "static"
assert_all(cursor, "SELECT * FROM test", [[0, 0, 24, 0], [0, 1, 24, 1]])
# Check we do correctly get the static column value with a SELECT *, even
# if we're only slicing part of the partition
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=0", [0, 0, 24, 0])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=0 ORDER BY p DESC", [0, 0, 24, 0])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=1", [0, 1, 24, 1])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=1 ORDER BY p DESC", [0, 1, 24, 1])
# Test for IN on the clustering key (#6769)
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND p IN (0, 1)", [[0, 0, 24, 0], [0, 1, 24, 1]])
# Check things still work if we don't select the static column. We also want
# this to not request the static columns internally at all, though that part
# require debugging to assert
assert_one(cursor, "SELECT p, v FROM test WHERE k=0 AND p=1", [1, 1])
# Check selecting only a static column with distinct only yield one value
# (as we only query the static columns)
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=0", [24])
# But without DISTINCT, we still get one result per row
assert_all(cursor, "SELECT s FROM test WHERE k=0", [[24], [24]])
# but that querying other columns does correctly yield the full partition
assert_all(cursor, "SELECT s, v FROM test WHERE k=0", [[24, 0], [24, 1]])
assert_one(cursor, "SELECT s, v FROM test WHERE k=0 AND p=1", [24, 1])
assert_one(cursor, "SELECT p, s FROM test WHERE k=0 AND p=1", [1, 24])
assert_one(cursor, "SELECT k, p, s FROM test WHERE k=0 AND p=1", [0, 1, 24])
# Check that deleting a row don't implicitely deletes statics
cursor.execute("DELETE FROM test WHERE k=0 AND p=0")
assert_all(cursor, "SELECT * FROM test", [[0, 1, 24, 1]])
# But that explicitely deleting the static column does remove it
cursor.execute("DELETE s FROM test WHERE k=0")
assert_all(cursor, "SELECT * FROM test", [[0, 1, None, 1]])
@since('2.1')
def static_columns_cas_test(self):
""""
@jira_ticket CASSANDRA-6839
@jira_ticket CASSANDRA-6561
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
id int,
k text,
version int static,
v text,
PRIMARY KEY (id, k)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Test that INSERT IF NOT EXISTS concerns only the static column if no clustering nor regular columns
# is provided, but concerns the CQL3 row targetted by the clustering columns otherwise
assert_one(cursor, "INSERT INTO test(id, k, v) VALUES (1, 'foo', 'foo') IF NOT EXISTS", [True])
assert_one(cursor, "INSERT INTO test(id, k, version) VALUES (1, 'foo', 1) IF NOT EXISTS", [False, 1, 'foo', None, 'foo'])
assert_one(cursor, "INSERT INTO test(id, version) VALUES (1, 1) IF NOT EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [1, 'foo', 1, 'foo'], ConsistencyLevel.SERIAL)
# Dodgy as its not conditional, but this is not allowed with a condition and that's probably fine in practice so go with it
cursor.execute("DELETE FROM test WHERE id = 1")
assert_one(cursor, "INSERT INTO test(id, version) VALUES (0, 0) IF NOT EXISTS", [True])
assert_one(cursor, "UPDATE test SET v='foo', version=1 WHERE id=0 AND k='k1' IF version = 0", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 1, 'foo']], ConsistencyLevel.SERIAL)
assert_one(cursor, "UPDATE test SET v='bar', version=1 WHERE id=0 AND k='k2' IF version = 0", [False, 1])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 1, 'foo']], ConsistencyLevel.SERIAL)
assert_one(cursor, "UPDATE test SET v='bar', version=2 WHERE id=0 AND k='k2' IF version = 1", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 2, 'foo'], [0, 'k2', 2, 'bar']], ConsistencyLevel.SERIAL)
# Testing batches
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='foobar' WHERE id=0 AND k='k1';
UPDATE test SET v='barfoo' WHERE id=0 AND k='k2';
UPDATE test SET version=3 WHERE id=0 IF version=1;
APPLY BATCH
""", [False, 0, None, 2])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='foobar' WHERE id=0 AND k='k1';
UPDATE test SET v='barfoo' WHERE id=0 AND k='k2';
UPDATE test SET version=3 WHERE id=0 IF version=2;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 3, 'foobar'], [0, 'k2', 3, 'barfoo']], ConsistencyLevel.SERIAL)
assert_all(cursor,
"""
BEGIN BATCH
UPDATE test SET version=4 WHERE id=0 IF version=3;
UPDATE test SET v='row1' WHERE id=0 AND k='k1' IF v='foo';
UPDATE test SET v='row2' WHERE id=0 AND k='k2' IF v='bar';
APPLY BATCH
""", [[False, 0, 'k1', 3, 'foobar'], [False, 0, 'k2', 3, 'barfoo']])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET version=4 WHERE id=0 IF version=3;
UPDATE test SET v='row1' WHERE id=0 AND k='k1' IF v='foobar';
UPDATE test SET v='row2' WHERE id=0 AND k='k2' IF v='barfoo';
APPLY BATCH
""", [True])
assert_invalid(cursor,
"""
BEGIN BATCH
UPDATE test SET version=5 WHERE id=0 IF version=4;
UPDATE test SET v='row1' WHERE id=0 AND k='k1';
UPDATE test SET v='row2' WHERE id=1 AND k='k2';
APPLY BATCH
""")
assert_one(cursor,
"""
BEGIN BATCH
INSERT INTO TEST (id, k, v) VALUES(1, 'k1', 'val1') IF NOT EXISTS;
INSERT INTO TEST (id, k, v) VALUES(1, 'k2', 'val2') IF NOT EXISTS;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', None, 'val1'], [1, 'k2', None, 'val2']], ConsistencyLevel.SERIAL)
assert_one(cursor,
"""
BEGIN BATCH
INSERT INTO TEST (id, k, v) VALUES(1, 'k2', 'val2') IF NOT EXISTS;
INSERT INTO TEST (id, k, v) VALUES(1, 'k3', 'val3') IF NOT EXISTS;
APPLY BATCH
""", [False, 1, 'k2', None, 'val2'])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal' WHERE id=1 AND k='k2' IF v='val0';
INSERT INTO TEST (id, k, v) VALUES(1, 'k3', 'val3') IF NOT EXISTS;
APPLY BATCH
""", [False, 1, 'k2', None, 'val2'])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', None, 'val1'], [1, 'k2', None, 'val2']], ConsistencyLevel.SERIAL)
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal' WHERE id=1 AND k='k2' IF v='val2';
INSERT INTO TEST (id, k, v, version) VALUES(1, 'k3', 'val3', 1) IF NOT EXISTS;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', 1, 'val1'], [1, 'k2', 1, 'newVal'], [1, 'k3', 1, 'val3']], ConsistencyLevel.SERIAL)
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal1' WHERE id=1 AND k='k2' IF v='val2';
UPDATE test SET v='newVal2' WHERE id=1 AND k='k2' IF v='val3';
APPLY BATCH
""", [False, 1, 'k2', 'newVal'])
def static_columns_with_2i_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
v int,
PRIMARY KEY (k, p)
)
""")
cursor.execute("CREATE INDEX ON test(v)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 0, 42, 1)")
cursor.execute("INSERT INTO test(k, p, v) VALUES (0, 1, 1)")
cursor.execute("INSERT INTO test(k, p, v) VALUES (0, 2, 2)")
assert_all(cursor, "SELECT * FROM test WHERE v = 1", [[0, 0, 42, 1], [0, 1, 42, 1]])
assert_all(cursor, "SELECT p, s FROM test WHERE v = 1", [[0, 42], [1, 42]])
assert_all(cursor, "SELECT p FROM test WHERE v = 1", [[0], [1]])
# We don't support that
assert_invalid(cursor, "SELECT s FROM test WHERE v = 1")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12364',
flaky=True)
@since('2.1')
def static_columns_with_distinct_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
PRIMARY KEY (k, p)
)
""")
# additional testing for CASSANRA-8087
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
s1 int static,
s2 int static,
PRIMARY KEY (k, c1, c2)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
cursor.execute("INSERT INTO test (k, p) VALUES (1, 1)")
cursor.execute("INSERT INTO test (k, p) VALUES (1, 2)")
assert_all(cursor, "SELECT k, s FROM test", [[1, None], [1, None]])
assert_one(cursor, "SELECT DISTINCT k, s FROM test", [1, None])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=1", [None])
assert_none(cursor, "SELECT DISTINCT s FROM test WHERE k=2")
cursor.execute("INSERT INTO test (k, p, s) VALUES (2, 1, 3)")
cursor.execute("INSERT INTO test (k, p) VALUES (2, 2)")
assert_all(cursor, "SELECT k, s FROM test", [[1, None], [1, None], [2, 3], [2, 3]])
assert_all(cursor, "SELECT DISTINCT k, s FROM test", [[1, None], [2, 3]])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=1", [None])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=2", [3])
assert_invalid(cursor, "SELECT DISTINCT s FROM test")
# paging to test for CASSANDRA-8108
cursor.execute("TRUNCATE test")
for i in range(10):
for j in range(10):
cursor.execute("INSERT INTO test (k, p, s) VALUES (%s, %s, %s)", (i, j, i))
cursor.default_fetch_size = 7
rows = list(cursor.execute("SELECT DISTINCT k, s FROM test"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s FROM test WHERE k IN ({})".format(keys)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(10), [r[1] for r in rows])
# additional testing for CASSANRA-8087
for i in range(10):
for j in range(5):
for k in range(5):
cursor.execute("INSERT INTO test2 (k, c1, c2, s1, s2) VALUES ({}, {}, {}, {}, {})".format(i, j, k, i, i + 1))
for fetch_size in (None, 2, 5, 7, 10, 24, 25, 26, 1000):
cursor.default_fetch_size = fetch_size
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
rows = list(cursor.execute("SELECT DISTINCT k, s2 FROM test2"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(1, 11), sorted([r[1] for r in rows]))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 LIMIT 10"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(10), [r[1] for r in rows])
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s2 FROM test2 WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(1, 11), [r[1] for r in rows])
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 WHERE k IN (%s) LIMIT 10" % (keys,)))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
def select_count_paging_test(self):
""" Test for the #6579 'select count' paging bug """
cursor = self.prepare()
cursor.execute("create table test(field1 text, field2 timeuuid, field3 boolean, primary key(field1, field2));")
cursor.execute("create index test_index on test(field3);")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("insert into test(field1, field2, field3) values ('hola', now(), false);")
cursor.execute("insert into test(field1, field2, field3) values ('hola', now(), false);")
# the result depends on which node we're connected to, see CASSANDRA-8216
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_one(cursor, "select count(*) from test where field3 = false limit 1;", [2])
else:
# the coordinator is the not-upgraded 2.1 node
assert_one(cursor, "select count(*) from test where field3 = false limit 1;", [1])
def cas_and_ttl_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int, lock boolean)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v, lock) VALUES (0, 0, false)")
cursor.execute("UPDATE test USING TTL 1 SET lock=true WHERE k=0")
time.sleep(2)
assert_one(cursor, "UPDATE test SET v = 1 WHERE k = 0 IF lock = null", [True])
def tuple_notation_test(self):
"""
Test the syntax introduced in CASSANDRA-4851
@jira_ticket CASSANDRA-4851
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, v1 int, v2 int, v3 int, PRIMARY KEY (k, v1, v2, v3))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 2):
for j in range(0, 2):
for k in range(0, 2):
cursor.execute("INSERT INTO test(k, v1, v2, v3) VALUES (0, %d, %d, %d)" % (i, j, k))
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0", [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2, v3) >= (1, 0, 1)", [[1, 0, 1], [1, 1, 0], [1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2) >= (1, 1)", [[1, 1, 0], [1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2) > (0, 1) AND (v1, v2, v3) <= (1, 1, 0)", [[1, 0, 0], [1, 0, 1], [1, 1, 0]])
assert_invalid(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v3) > (1, 0)")
@since('2.0', max_version='2.2.X')
def test_v2_protocol_IN_with_tuples(self):
"""
@jira_ticket CASSANDRA-8062
"""
for version in self.get_node_versions():
if version >= '3.0':
raise SkipTest('version {} not compatible with protocol version 2'.format(version))
cursor = self.prepare(protocol_version=2)
cursor.execute("CREATE TABLE test (k int, c1 int, c2 text, PRIMARY KEY (k, c1, c2))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'a')")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'b')")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'c')")
p = cursor.prepare("SELECT * FROM test WHERE k=? AND (c1, c2) IN ?")
rows = list(cursor.execute(p, (0, [(0, 'b'), (0, 'c')])))
self.assertEqual(2, len(rows))
assert_length_equal(rows, 2)
self.assertEqual((0, 0, 'b'), rows[0])
self.assertEqual((0, 0, 'c'), rows[1])
def in_with_desc_order_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 1)")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 2)")
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (0, 2)", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 DESC", [[0, 0, 2], [0, 0, 0]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (0, 2) ORDER BY c1 ASC", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (0, 2) ORDER BY c1 DESC", [[0, 0, 2], [0, 0, 0]])
@since('2.1')
def in_order_by_without_selecting_test(self):
"""
Test that columns don't need to be selected for ORDER BY when there is a IN
@jira_ticket CASSANDRA-4911
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, c1 int, c2 int, v int, PRIMARY KEY (k, c1, c2))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = None
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 0, 0)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 1, 1)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 2, 2)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 0, 3)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 1, 4)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 2, 5)")
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0, 0, 0, 0], [0, 0, 2, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC, c2 ASC", [[0, 0, 0, 0], [0, 0, 2, 2]])
# check that we don't need to select the column on which we order
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC", [[0], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 DESC", [[2], [0]])
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0)", [[0], [1], [2], [3], [4], [5]])
else:
# the coordinator is the non-upgraded 2.1 node
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0)", [[3], [4], [5], [0], [1], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0) ORDER BY c1 ASC", [[0], [1], [2], [3], [4], [5]])
# we should also be able to use functions in the select clause (additional test for CASSANDRA-8286)
results = list(cursor.execute("SELECT writetime(v) FROM test WHERE k IN (1, 0) ORDER BY c1 ASC"))
# since we don't know the write times, just assert that the order matches the order we expect
self.assertEqual(results, list(sorted(results)))
def cas_and_compact_test(self):
""" Test for CAS with compact storage table, and #6813 in particular """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE lock (
partition text,
key text,
owner text,
PRIMARY KEY (partition, key)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE lock")
cursor.execute("INSERT INTO lock(partition, key, owner) VALUES ('a', 'b', null)")
assert_one(cursor, "UPDATE lock SET owner='z' WHERE partition='a' AND key='b' IF owner=null", [True])
assert_one(cursor, "UPDATE lock SET owner='b' WHERE partition='a' AND key='b' IF owner='a'", [False, 'z'])
assert_one(cursor, "UPDATE lock SET owner='b' WHERE partition='a' AND key='b' IF owner='z'", [True])
assert_one(cursor, "INSERT INTO lock(partition, key, owner) VALUES ('a', 'c', 'x') IF NOT EXISTS", [True])
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12251',
flaky=True)
@since('2.1.1')
def whole_list_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l list<text>
)""")
cursor.execute("""
CREATE TABLE frozentlist (
k int PRIMARY KEY,
l frozen<list<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tlist")
cursor.execute("TRUNCATE frozentlist")
for frozen in (False, True):
table = "frozentlist" if frozen else "tlist"
cursor.execute("INSERT INTO {}(k, l) VALUES (0, ['foo', 'bar', 'foobar'])".format(table))
def check_applies(condition):
assert_one(cursor, "UPDATE {} SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF {}".format(table, condition), [True], cl=self.CL)
assert_one(cursor, "SELECT * FROM {}".format(table), [0, ['foo', 'bar', 'foobar']]) # read back at default cl.one
check_applies("l = ['foo', 'bar', 'foobar']")
check_applies("l != ['baz']")
check_applies("l > ['a']")
check_applies("l >= ['a']")
check_applies("l < ['z']")
check_applies("l <= ['z']")
check_applies("l IN (null, ['foo', 'bar', 'foobar'], ['a'])")
# multiple conditions
check_applies("l > ['aaa', 'bbb'] AND l > ['aaa']")
check_applies("l != null AND l IN (['foo', 'bar', 'foobar'])")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE {} SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF {}".format(table, condition),
[False, ['foo', 'bar', 'foobar']], cl=self.CL)
assert_one(cursor, "SELECT * FROM {}".format((table)), [0, ['foo', 'bar', 'foobar']]) # read back at default cl.one
# should not apply
check_does_not_apply("l = ['baz']")
check_does_not_apply("l != ['foo', 'bar', 'foobar']")
check_does_not_apply("l > ['z']")
check_does_not_apply("l >= ['z']")
check_does_not_apply("l < ['a']")
check_does_not_apply("l <= ['a']")
check_does_not_apply("l IN (['a'], null)")
check_does_not_apply("l IN ()")
# multiple conditions
check_does_not_apply("l IN () AND l IN (['foo', 'bar', 'foobar'])")
check_does_not_apply("l > ['zzz'] AND l < ['zzz']")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE {} SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF {}".format(table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM {}".format(table), [0, ['foo', 'bar', 'foobar']], cl=self.CL)
check_invalid("l = [null]")
check_invalid("l < null")
check_invalid("l <= null")
check_invalid("l > null")
check_invalid("l >= null")
check_invalid("l IN null", expected=SyntaxException)
check_invalid("l IN 367", expected=SyntaxException)
check_invalid("l CONTAINS KEY 123", expected=SyntaxException)
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
@since('2.1')
def list_item_conditional_test(self):
# Lists
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l list<text>
)""")
cursor.execute("""
CREATE TABLE frozentlist (
k int PRIMARY KEY,
l frozen<list<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tlist")
cursor.execute("TRUNCATE frozentlist")
for frozen in (False, True):
table = "frozentlist" if frozen else "tlist"
assert_one(cursor, "INSERT INTO %s(k, l) VALUES (0, ['foo', 'bar', 'foobar']) IF NOT EXISTS" % (table,), [True])
assert_invalid(cursor, "DELETE FROM %s WHERE k=0 IF l[null] = 'foobar'" % (table,))
assert_invalid(cursor, "DELETE FROM %s WHERE k=0 IF l[-2] = 'foobar'" % (table,))
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF l[1] = null" % (table,), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF l[1] = 'foobar'" % (table,), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']], cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF l[1] = 'bar'" % (table,), [True])
assert_none(cursor, "SELECT * FROM %s" % (table,), cl=ConsistencyLevel.SERIAL)
@since('2.1.1')
def expanded_list_item_conditional_test(self):
# expanded functionality from CASSANDRA-6839
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l list<text>
)""")
cursor.execute("""
CREATE TABLE frozentlist (
k int PRIMARY KEY,
l frozen<list<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tlist")
cursor.execute("TRUNCATE frozentlist")
for frozen in (False, True):
table = "frozentlist" if frozen else "tlist"
cursor.execute("INSERT INTO %s(k, l) VALUES (0, ['foo', 'bar', 'foobar'])" % (table,))
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']])
check_applies("l[1] < 'zzz'")
check_applies("l[1] <= 'bar'")
check_applies("l[1] > 'aaa'")
check_applies("l[1] >= 'bar'")
check_applies("l[1] != 'xxx'")
check_applies("l[1] != null")
check_applies("l[1] IN (null, 'xxx', 'bar')")
check_applies("l[1] > 'aaa' AND l[1] < 'zzz'")
# check beyond end of list
check_applies("l[3] = null")
check_applies("l[3] IN (null, 'xxx', 'bar')")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (table, condition), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']])
check_does_not_apply("l[1] < 'aaa'")
check_does_not_apply("l[1] <= 'aaa'")
check_does_not_apply("l[1] > 'zzz'")
check_does_not_apply("l[1] >= 'zzz'")
check_does_not_apply("l[1] != 'bar'")
check_does_not_apply("l[1] IN (null, 'xxx')")
check_does_not_apply("l[1] IN ()")
check_does_not_apply("l[1] != null AND l[1] IN ()")
# check beyond end of list
check_does_not_apply("l[3] != null")
check_does_not_apply("l[3] = 'xxx'")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']])
check_invalid("l[1] < null")
check_invalid("l[1] <= null")
check_invalid("l[1] > null")
check_invalid("l[1] >= null")
check_invalid("l[1] IN null", expected=SyntaxException)
check_invalid("l[1] IN 367", expected=SyntaxException)
check_invalid("l[1] IN (1, 2, 3)")
check_invalid("l[1] CONTAINS 367", expected=SyntaxException)
check_invalid("l[1] CONTAINS KEY 367", expected=SyntaxException)
check_invalid("l[null] = null")
@since('2.1.1')
def whole_set_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tset (
k int PRIMARY KEY,
s set<text>
)""")
cursor.execute("""
CREATE TABLE frozentset (
k int PRIMARY KEY,
s frozen<set<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tset")
cursor.execute("TRUNCATE frozentset")
for frozen in (False, True):
table = "frozentset" if frozen else "tset"
assert_one(cursor, "INSERT INTO %s(k, s) VALUES (0, {'bar', 'foo'}) IF NOT EXISTS" % (table,), [True])
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, set(['bar', 'foo'])], cl=ConsistencyLevel.SERIAL)
check_applies("s = {'bar', 'foo'}")
check_applies("s = {'foo', 'bar'}")
check_applies("s != {'baz'}")
check_applies("s > {'a'}")
check_applies("s >= {'a'}")
check_applies("s < {'z'}")
check_applies("s <= {'z'}")
check_applies("s IN (null, {'bar', 'foo'}, {'a'})")
# multiple conditions
check_applies("s > {'a'} AND s < {'z'}")
check_applies("s IN (null, {'bar', 'foo'}, {'a'}) AND s IN ({'a'}, {'bar', 'foo'}, null)")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (table, condition),
[False, {'bar', 'foo'}])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'bar', 'foo'}], cl=ConsistencyLevel.SERIAL)
# should not apply
check_does_not_apply("s = {'baz'}")
check_does_not_apply("s != {'bar', 'foo'}")
check_does_not_apply("s > {'z'}")
check_does_not_apply("s >= {'z'}")
check_does_not_apply("s < {'a'}")
check_does_not_apply("s <= {'a'}")
check_does_not_apply("s IN ({'a'}, null)")
check_does_not_apply("s IN ()")
check_does_not_apply("s != null AND s IN ()")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'bar', 'foo'}], cl=ConsistencyLevel.SERIAL)
check_invalid("s = {null}")
check_invalid("s < null")
check_invalid("s <= null")
check_invalid("s > null")
check_invalid("s >= null")
check_invalid("s IN null", expected=SyntaxException)
check_invalid("s IN 367", expected=SyntaxException)
check_invalid("s CONTAINS KEY 123", expected=SyntaxException)
# element access is not allow for sets
check_invalid("s['foo'] = 'foobar'")
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
@since('2.1.1')
def whole_map_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m map<text, text>
)""")
cursor.execute("""
CREATE TABLE frozentmap (
k int PRIMARY KEY,
m frozen<map<text, text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tmap")
cursor.execute("TRUNCATE frozentmap")
for frozen in (False, True):
debug("Testing {} maps".format("frozen" if frozen else "normal"))
table = "frozentmap" if frozen else "tmap"
cursor.execute("INSERT INTO %s(k, m) VALUES (0, {'foo' : 'bar'})" % (table,))
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_applies("m = {'foo': 'bar'}")
check_applies("m > {'a': 'a'}")
check_applies("m >= {'a': 'a'}")
check_applies("m < {'z': 'z'}")
check_applies("m <= {'z': 'z'}")
check_applies("m != {'a': 'a'}")
check_applies("m IN (null, {'a': 'a'}, {'foo': 'bar'})")
# multiple conditions
check_applies("m > {'a': 'a'} AND m < {'z': 'z'}")
check_applies("m != null AND m IN (null, {'a': 'a'}, {'foo': 'bar'})")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
# should not apply
check_does_not_apply("m = {'a': 'a'}")
check_does_not_apply("m > {'z': 'z'}")
check_does_not_apply("m >= {'z': 'z'}")
check_does_not_apply("m < {'a': 'a'}")
check_does_not_apply("m <= {'a': 'a'}")
check_does_not_apply("m != {'foo': 'bar'}")
check_does_not_apply("m IN ({'a': 'a'}, null)")
check_does_not_apply("m IN ()")
check_does_not_apply("m = null AND m != null")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_invalid("m = {null: null}")
check_invalid("m = {'a': null}")
check_invalid("m = {null: 'a'}")
check_invalid("m < null")
check_invalid("m IN null", expected=SyntaxException)
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
check_invalid("m CONTAINS KEY 'foo'", expected=SyntaxException)
check_invalid("m CONTAINS null", expected=SyntaxException)
check_invalid("m CONTAINS KEY null", expected=SyntaxException)
@since('2.1')
def map_item_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m map<text, text>
)""")
cursor.execute("""
CREATE TABLE frozentmap (
k int PRIMARY KEY,
m frozen<map<text, text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tmap")
cursor.execute("TRUNCATE frozentmap")
for frozen in (False, True):
table = "frozentmap" if frozen else "tmap"
assert_one(cursor, "INSERT INTO %s(k, m) VALUES (0, {'foo' : 'bar'}) IF NOT EXISTS" % (table,), [True])
assert_invalid(cursor, "DELETE FROM %s WHERE k=0 IF m[null] = 'foo'" % (table,))
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF m['foo'] = 'foo'" % (table,), [False, {'foo': 'bar'}])
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF m['foo'] = null" % (table,), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF m['foo'] = 'bar'" % (table,), [True])
assert_none(cursor, "SELECT * FROM %s" % (table,), cl=ConsistencyLevel.SERIAL)
if self.get_version() > "2.1.1":
cursor.execute("INSERT INTO %s(k, m) VALUES (1, null)" % (table,))
if frozen:
assert_invalid(cursor, "UPDATE %s set m['foo'] = 'bar', m['bar'] = 'foo' WHERE k = 1 IF m['foo'] IN ('blah', null)" % (table,))
else:
assert_one(cursor, "UPDATE %s set m['foo'] = 'bar', m['bar'] = 'foo' WHERE k = 1 IF m['foo'] IN ('blah', null)" % (table,), [True])
@since('2.1.1')
def expanded_map_item_conditional_test(self):
# expanded functionality from CASSANDRA-6839
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m map<text, text>
)""")
cursor.execute("""
CREATE TABLE frozentmap (
k int PRIMARY KEY,
m frozen<map<text, text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tmap")
cursor.execute("TRUNCATE frozentmap")
for frozen in (False, True):
debug("Testing {} maps".format("frozen" if frozen else "normal"))
table = "frozentmap" if frozen else "tmap"
cursor.execute("INSERT INTO %s (k, m) VALUES (0, {'foo' : 'bar'})" % table)
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM {}".format(table), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_applies("m['xxx'] = null")
check_applies("m['foo'] < 'zzz'")
check_applies("m['foo'] <= 'bar'")
check_applies("m['foo'] > 'aaa'")
check_applies("m['foo'] >= 'bar'")
check_applies("m['foo'] != 'xxx'")
check_applies("m['foo'] != null")
check_applies("m['foo'] IN (null, 'xxx', 'bar')")
check_applies("m['xxx'] IN (null, 'xxx', 'bar')") # m['xxx'] is not set
# multiple conditions
check_applies("m['foo'] < 'zzz' AND m['foo'] > 'aaa'")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM {}".format(table), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_does_not_apply("m['foo'] < 'aaa'")
check_does_not_apply("m['foo'] <= 'aaa'")
check_does_not_apply("m['foo'] > 'zzz'")
check_does_not_apply("m['foo'] >= 'zzz'")
check_does_not_apply("m['foo'] != 'bar'")
check_does_not_apply("m['xxx'] != null") # m['xxx'] is not set
check_does_not_apply("m['foo'] IN (null, 'xxx')")
check_does_not_apply("m['foo'] IN ()")
check_does_not_apply("m['foo'] != null AND m['foo'] = null")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM {}".format(table), [0, {'foo': 'bar'}])
check_invalid("m['foo'] < null")
check_invalid("m['foo'] <= null")
check_invalid("m['foo'] > null")
check_invalid("m['foo'] >= null")
check_invalid("m['foo'] IN null", expected=SyntaxException)
check_invalid("m['foo'] IN 367", expected=SyntaxException)
check_invalid("m['foo'] IN (1, 2, 3)")
check_invalid("m['foo'] CONTAINS 367", expected=SyntaxException)
check_invalid("m['foo'] CONTAINS KEY 367", expected=SyntaxException)
check_invalid("m[null] = null")
@since("2.1.1")
def cas_and_list_index_test(self):
""" Test for 7499 test """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v text,
l list<text>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, v, l) VALUES(0, 'foobar', ['foi', 'bar'])")
assert_one(cursor, "UPDATE test SET l[0] = 'foo' WHERE k = 0 IF v = 'barfoo'", [False, 'foobar'])
assert_one(cursor, "UPDATE test SET l[0] = 'foo' WHERE k = 0 IF v = 'foobar'", [True])
# since we write at all, and LWT update (serial), we need to read back at serial (or higher)
assert_one(cursor, "SELECT * FROM test", [0, ['foo', 'bar'], 'foobar'], cl=ConsistencyLevel.SERIAL)
@since("2.0")
def static_with_limit_test(self):
""" Test LIMIT when static columns are present (#6956) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
s int static,
v int,
PRIMARY KEY (k, v)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, s) VALUES(0, 42)")
for i in range(0, 4):
cursor.execute("INSERT INTO test(k, v) VALUES(0, {})".format(i))
assert_one(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 1", [0, 0, 42])
assert_all(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 2", [[0, 0, 42], [0, 1, 42]])
assert_all(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 3", [[0, 0, 42], [0, 1, 42], [0, 2, 42]])
@since("2.0")
def static_with_empty_clustering_test(self):
""" Test for bug of #7455 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
pkey text,
ckey text,
value text,
static_value text static,
PRIMARY KEY(pkey, ckey)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(pkey, static_value) VALUES ('partition1', 'static value')")
cursor.execute("INSERT INTO test(pkey, ckey, value) VALUES('partition1', '', 'value')")
assert_one(cursor, "SELECT * FROM test", ['partition1', '', 'static value', 'value'])
@since("1.2")
def limit_compact_table_test(self):
""" Check for #7052 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k, v)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 4):
for j in range(0, 4):
cursor.execute("INSERT INTO test(k, v) VALUES (%d, %d)" % (i, j))
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND v > 0 AND v <= 4 LIMIT 2", [[1], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND v > -1 AND v <= 4 LIMIT 2", [[0], [1]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > 0 AND v <= 4 LIMIT 2", [[0, 1], [0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > -1 AND v <= 4 LIMIT 2", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > 0 AND v <= 4 LIMIT 6", [[0, 1], [0, 2], [0, 3], [1, 1], [1, 2], [1, 3]])
# This doesn't work -- see #7059
# assert_all(cursor, "SELECT * FROM test WHERE v > 1 AND v <= 3 LIMIT 6 ALLOW FILTERING", [[1, 2], [1, 3], [0, 2], [0, 3], [2, 2], [2, 3]])
def key_index_with_reverse_clustering_test(self):
""" Test for #6950 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
v int,
PRIMARY KEY ((k1, k2), v)
) WITH CLUSTERING ORDER BY (v DESC)
""")
cursor.execute("CREATE INDEX ON test(k2)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 0, 1)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 1, 2)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 0, 3)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (1, 0, 4)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (1, 1, 5)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (2, 0, 7)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (2, 1, 8)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (3, 0, 1)")
assert_all(cursor, "SELECT * FROM test WHERE k2 = 0 AND v >= 2 ALLOW FILTERING", [[2, 0, 7], [0, 0, 3], [1, 0, 4]])
@since('2.1')
def invalid_custom_timestamp_test(self):
"""
@jira_ticket CASSANDRA-7067
"""
cursor = self.prepare()
# Conditional updates
cursor.execute("CREATE TABLE test (k int, v int, PRIMARY KEY (k, v))")
# Counters
cursor.execute("CREATE TABLE counters (k int PRIMARY KEY, c counter)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE counters")
cursor.execute("BEGIN BATCH INSERT INTO test(k, v) VALUES(0, 0) IF NOT EXISTS; INSERT INTO test(k, v) VALUES(0, 1) IF NOT EXISTS; APPLY BATCH")
assert_invalid(cursor, "BEGIN BATCH INSERT INTO test(k, v) VALUES(0, 2) IF NOT EXISTS USING TIMESTAMP 1; INSERT INTO test(k, v) VALUES(0, 3) IF NOT EXISTS; APPLY BATCH")
assert_invalid(cursor, "BEGIN BATCH USING TIMESTAMP 1 INSERT INTO test(k, v) VALUES(0, 4) IF NOT EXISTS; INSERT INTO test(k, v) VALUES(0, 1) IF NOT EXISTS; APPLY BATCH")
cursor.execute("INSERT INTO test(k, v) VALUES(1, 0) IF NOT EXISTS")
assert_invalid(cursor, "INSERT INTO test(k, v) VALUES(1, 1) IF NOT EXISTS USING TIMESTAMP 5")
# counters
cursor.execute("UPDATE counters SET c = c + 1 WHERE k = 0")
assert_invalid(cursor, "UPDATE counters USING TIMESTAMP 10 SET c = c + 1 WHERE k = 0")
cursor.execute("BEGIN COUNTER BATCH UPDATE counters SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
assert_invalid(cursor, "BEGIN COUNTER BATCH UPDATE counters USING TIMESTAMP 3 SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
assert_invalid(cursor, "BEGIN COUNTER BATCH USING TIMESTAMP 3 UPDATE counters SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
def clustering_order_in_test(self):
"""Test for #7105 bug"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
a int,
b int,
c int,
PRIMARY KEY ((a, b), c)
) with clustering order by (c desc)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (a, b, c) VALUES (1, 2, 3)")
cursor.execute("INSERT INTO test (a, b, c) VALUES (4, 5, 6)")
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 AND c IN (3)", [1, 2, 3])
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 AND c IN (3, 4)", [1, 2, 3])
def bug7105_test(self):
"""Test for #7105 bug"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
a int,
b int,
c int,
d int,
PRIMARY KEY (a, b)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (a, b, c, d) VALUES (1, 2, 3, 3)")
cursor.execute("INSERT INTO test (a, b, c, d) VALUES (1, 4, 6, 5)")
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 ORDER BY b DESC", [1, 2, 3, 3])
def bug_6612_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE session_data (
username text,
session_id text,
app_name text,
account text,
last_access timestamp,
created_on timestamp,
PRIMARY KEY (username, session_id, app_name, account)
);
""")
# cursor.execute("create index sessionIndex ON session_data (session_id)")
cursor.execute("create index sessionAppName ON session_data (app_name)")
cursor.execute("create index lastAccessIndex ON session_data (last_access)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE session_data")
assert_one(cursor, "select count(*) from session_data where app_name='foo' and account='bar' and last_access > 4 allow filtering", [0])
cursor.execute("insert into session_data (username, session_id, app_name, account, last_access, created_on) values ('toto', 'foo', 'foo', 'bar', 12, 13)")
assert_one(cursor, "select count(*) from session_data where app_name='foo' and account='bar' and last_access > 4 allow filtering", [1])
def blobAs_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
# A blob that is not 4 bytes should be rejected
assert_invalid(cursor, "INSERT INTO test(k, v) VALUES (0, blobAsInt(0x01))")
def invalid_string_literals_test(self):
""" Test for CASSANDRA-8101 """
cursor = self.prepare()
cursor.execute("create table invalid_string_literals (k int primary key, a ascii, b text)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE invalid_string_literals")
assert_invalid(cursor, u"insert into ks.invalid_string_literals (k, a) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
# since the protocol requires strings to be valid UTF-8, the error response to this is a ProtocolError
try:
cursor.execute("insert into ks.invalid_string_literals (k, c) VALUES (0, '\xc2\x01')")
self.fail("Expected error")
except ProtocolException as e:
self.assertTrue("Cannot decode string as UTF8" in str(e))
def negative_timestamp_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES (1, 1) USING TIMESTAMP -42")
assert_one(cursor, "SELECT writetime(v) FROM TEST WHERE k = 1", [-42])
@since('2.2')
@require("7396")
def select_map_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v map<int, text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {1:'a', 2:'b', 3:'c', 4:'d'})")
assert_one(cursor, "SELECT v[1] FROM test WHERE k = 0", ['a'])
assert_one(cursor, "SELECT v[5] FROM test WHERE k = 0", [])
assert_one(cursor, "SELECT v[1] FROM test WHERE k = 1", [])
assert_one(cursor, "SELECT v[1..3] FROM test WHERE k = 0", ['a', 'b', 'c'])
assert_one(cursor, "SELECT v[3..5] FROM test WHERE k = 0", ['c', 'd'])
assert_invalid(cursor, "SELECT v[3..1] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v[..2] FROM test WHERE k = 0", ['a', 'b'])
assert_one(cursor, "SELECT v[3..] FROM test WHERE k = 0", ['c', 'd'])
assert_one(cursor, "SELECT v[0..] FROM test WHERE k = 0", ['a', 'b', 'c', 'd'])
assert_one(cursor, "SELECT v[..5] FROM test WHERE k = 0", ['a', 'b', 'c', 'd'])
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('2.2')
@require("7396")
def select_set_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v set<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {'e', 'a', 'd', 'b'})")
assert_one(cursor, "SELECT v FROM test WHERE k = 0", [sortedset(['a', 'b', 'd', 'e'])])
assert_one(cursor, "SELECT v['a'] FROM test WHERE k = 0", [True])
assert_one(cursor, "SELECT v['c'] FROM test WHERE k = 0", [False])
assert_one(cursor, "SELECT v['a'] FROM test WHERE k = 1", [])
assert_one(cursor, "SELECT v['b'..'d'] FROM test WHERE k = 0", ['b', 'd'])
assert_one(cursor, "SELECT v['b'..'e'] FROM test WHERE k = 0", ['b', 'd', 'e'])
assert_one(cursor, "SELECT v['a'..'d'] FROM test WHERE k = 0", ['a', 'b', 'd'])
assert_one(cursor, "SELECT v['b'..'f'] FROM test WHERE k = 0", ['b', 'd', 'e'])
assert_invalid(cursor, "SELECT v['d'..'a'] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v['d'..] FROM test WHERE k = 0", ['d', 'e'])
assert_one(cursor, "SELECT v[..'d'] FROM test WHERE k = 0", ['a', 'b', 'd'])
assert_one(cursor, "SELECT v['f'..] FROM test WHERE k = 0", [])
assert_one(cursor, "SELECT v[..'f'] FROM test WHERE k = 0", ['a', 'b', 'd', 'e'])
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('2.2')
@require("7396")
def select_list_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, ['e', 'a', 'd', 'b'])")
assert_one(cursor, "SELECT v FROM test WHERE k = 0", [['e', 'a', 'd', 'b']])
assert_one(cursor, "SELECT v[0] FROM test WHERE k = 0", ['e'])
assert_one(cursor, "SELECT v[3] FROM test WHERE k = 0", ['b'])
assert_one(cursor, "SELECT v[0] FROM test WHERE k = 1", [])
assert_invalid(cursor, "SELECT v[-1] FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT v[5] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v[1..3] FROM test WHERE k = 0", ['a', 'd', 'b'])
assert_one(cursor, "SELECT v[0..2] FROM test WHERE k = 0", ['e', 'a', 'd'])
assert_invalid(cursor, "SELECT v[0..4] FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT v[2..0] FROM test WHERE k = 0")
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('2.2')
@require("7396")
def select_map_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v map<int, text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {1:'a', 2:'b', 3:'c', 4:'d'})")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, {1:'a', 2:'b', 5:'e', 6:'f'})")
assert_all(cursor, "SELECT v[1] FROM test", [['a'], ['a']])
assert_all(cursor, "SELECT v[5] FROM test", [[], ['e']])
assert_all(cursor, "SELECT v[4] FROM test", [['d'], []])
assert_all(cursor, "SELECT v[1..3] FROM test", [['a', 'b', 'c'], ['a', 'b', 'e']])
assert_all(cursor, "SELECT v[3..5] FROM test", [['c', 'd'], ['e']])
assert_invalid(cursor, "SELECT v[3..1] FROM test")
assert_all(cursor, "SELECT v[..2] FROM test", [['a', 'b'], ['a', 'b']])
assert_all(cursor, "SELECT v[3..] FROM test", [['c', 'd'], ['e', 'f']])
assert_all(cursor, "SELECT v[0..] FROM test", [['a', 'b', 'c', 'd'], ['a', 'b', 'e', 'f']])
assert_all(cursor, "SELECT v[..5] FROM test", [['a', 'b', 'c', 'd'], ['a', 'b', 'e']])
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('2.2')
@require("7396")
def select_set_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v set<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {'e', 'a', 'd', 'b'})")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, {'c', 'f', 'd', 'b'})")
assert_all(cursor, "SELECT v FROM test", [[sortedset(['b', 'c', 'd', 'f'])], [sortedset(['a', 'b', 'd', 'e'])]])
assert_all(cursor, "SELECT v['a'] FROM test", [[True], [False]])
assert_all(cursor, "SELECT v['c'] FROM test", [[False], [True]])
assert_all(cursor, "SELECT v['b'..'d'] FROM test", [['b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['b'..'e'] FROM test", [['b', 'd', 'e'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['a'..'d'] FROM test", [['a', 'b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['b'..'f'] FROM test", [['b', 'd', 'e'], ['b', 'c', 'd', 'f']])
assert_invalid(cursor, "SELECT v['d'..'a'] FROM test")
assert_all(cursor, "SELECT v['d'..] FROM test", [['d', 'e'], ['d', 'f']])
assert_all(cursor, "SELECT v[..'d'] FROM test", [['a', 'b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['f'..] FROM test", [[], ['f']])
assert_all(cursor, "SELECT v[..'f'] FROM test", [['a', 'b', 'd', 'e'], ['b', 'c', 'd', 'f']])
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('2.2')
@require("7396")
def select_list_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, ['e', 'a', 'd', 'b'])")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, ['c', 'f', 'd', 'b'])")
assert_all(cursor, "SELECT v FROM test", [[['c', 'f', 'd', 'b']], [['e', 'a', 'd', 'b']]])
assert_all(cursor, "SELECT v[0] FROM test", [['e'], ['c']])
assert_all(cursor, "SELECT v[3] FROM test", [['b'], ['b']])
assert_invalid(cursor, "SELECT v[-1] FROM test")
assert_invalid(cursor, "SELECT v[5] FROM test")
assert_all(cursor, "SELECT v[1..3] FROM test", [['a', 'd', 'b'], ['f', 'd', 'b']])
assert_all(cursor, "SELECT v[0..2] FROM test", [['e', 'a', 'd'], ['c', 'f', 'd']])
assert_invalid(cursor, "SELECT v[0..4] FROM test")
assert_invalid(cursor, "SELECT v[2..0] FROM test")
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
def bug_8558_test(self):
cursor = self.prepare()
node1 = self.cluster.nodelist()[0]
cursor.execute("CREATE KEYSPACE space1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
cursor.execute("CREATE TABLE space1.table1(a int, b int, c text,primary key(a,b))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE space1.table1")
cursor.execute("INSERT INTO space1.table1(a,b,c) VALUES(1,1,'1')")
node1.nodetool('flush')
cursor.execute("DELETE FROM space1.table1 where a=1 and b=1")
node1.nodetool('flush')
assert_none(cursor, "select * from space1.table1 where a=1 and b=1")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12457',
flaky=True)
def bug_5732_test(self):
cursor = self.prepare(use_cache=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int,
)
""")
if self.node_version_above('2.1'):
cursor.execute("ALTER TABLE test WITH caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}")
else:
cursor.execute("ALTER TABLE test WITH CACHING='ALL'")
cursor.execute("INSERT INTO test (k,v) VALUES (0,0)")
cursor.execute("INSERT INTO test (k,v) VALUES (1,1)")
cursor.execute("CREATE INDEX testindex on test(v)")
# wait for the index to be fully built
check_for_index_sessions = tuple(self.patient_exclusive_cql_connection(node) for node in self.cluster.nodelist())
index_query = (
"""SELECT * FROM system_schema.indexes WHERE keyspace_name = 'ks' AND table_name = 'test' AND index_name = 'testindex'"""
if self.node_version_above('3.0') else
"""SELECT * FROM system."IndexInfo" WHERE table_name = 'ks' AND index_name = 'test.testindex'"""
)
start = time.time()
while True:
results = [list(session.execute(index_query)) for session in check_for_index_sessions]
debug(results)
if all(results):
break
if time.time() - start > 10.0:
failure_info_query = (
'SELECT * FROM system_schema.indexes'
if self.node_version_above('3.0') else
'SELECT * FROM system."IndexInfo"'
)
raise Exception("Failed to build secondary index within ten seconds: %s" % (list(cursor.execute(failure_info_query))))
time.sleep(0.1)
assert_all(cursor, "SELECT k FROM test WHERE v = 0", [[0]])
self.cluster.stop()
time.sleep(0.5)
self.cluster.start(wait_for_binary_proto=True)
time.sleep(0.5)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_all(cursor, "SELECT k FROM ks.test WHERE v = 0", [[0]])
def bug_10652_test(self):
cursor = self.prepare()
cursor.execute("CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
cursor.execute("CREATE TABLE foo.bar (k int PRIMARY KEY, v int)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
future = cursor.execute_async("INSERT INTO foo.bar(k, v) VALUES (0, 0)", trace=True)
future.result()
future.get_query_trace(max_wait=120)
self.cluster.flush()
assert_one(cursor, "SELECT * FROM foo.bar", [0, 0])
topology_specs = [
{'NODES': 3,
'RF': 3,
'CL': ConsistencyLevel.ALL},
{'NODES': 2,
'RF': 1},
]
specs = [dict(s, UPGRADE_PATH=p, __test__=True)
for s, p in itertools.product(topology_specs, build_upgrade_pairs())]
for spec in specs:
suffix = 'Nodes{num_nodes}RF{rf}_{pathname}'.format(num_nodes=spec['NODES'],
rf=spec['RF'],
pathname=spec['UPGRADE_PATH'].name)
gen_class_name = TestCQL.__name__ + suffix
assert_not_in(gen_class_name, globals())
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or spec['UPGRADE_PATH'].upgrade_meta.matches_current_env_version_family
globals()[gen_class_name] = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(type(gen_class_name, (TestCQL,), spec))
| apache-2.0 | -267,808,268,547,125,220 | 43.327393 | 192 | 0.525782 | false | 3.693774 | true | false | false |
fullphat/redsquare | rs_pcd8544.py | 1 | 1295 | # RedSqaure
# pcd8544.device handler
# LCD matrix used in the original Nokia 3310 phones
# Copyright (c) 2017 full phat products
#
import threading
import sos
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# init:
#
def init():
global lcd
try:
import pcd8544lib as lcd
sos.sos_print("Initialising device...")
lcd.LCDinit()
lcd.LCDprint("RSOS 2.1")
lcd.LCDprint("READY")
return True
except:
sos.sos_fail("Couldn't load pcd8544lib")
return False
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# device handler
# return bool,string (True,"OK") if query was handled, or false otherwise
#
def handle(queryDict, apiVersion=0, unit=0):
# check to see if our thread is still running, if so
# this means we're scrolling a message. For now we
# fail and return a 'device busy' message...
# get supplied info...
if 'device' in queryDict:
_device = queryDict['device'][0]
if 'mode' in queryDict:
_mode = queryDict['mode'][0]
if 'text' in queryDict:
_text = queryDict['text'][0]
# final checks...
if _text == "":
return (False, "Nothing to display")
sos.sos_print("Unit is " + str(unit))
lcd.LCDprint(_text)
return (True, "OK")
| mit | -4,017,047,913,291,437,600 | 20.327586 | 73 | 0.562934 | false | 3.061466 | false | false | false |
leppa/home-assistant | homeassistant/components/ring/light.py | 1 | 2991 | """This component provides HA switch support for Ring Door Bell/Chimes."""
from datetime import timedelta
import logging
from homeassistant.components.light import Light
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.dt as dt_util
from . import DATA_RING_STICKUP_CAMS, SIGNAL_UPDATE_RING
_LOGGER = logging.getLogger(__name__)
# It takes a few seconds for the API to correctly return an update indicating
# that the changes have been made. Once we request a change (i.e. a light
# being turned on) we simply wait for this time delta before we allow
# updates to take place.
SKIP_UPDATES_DELAY = timedelta(seconds=5)
ON_STATE = "on"
OFF_STATE = "off"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the lights for the Ring devices."""
cameras = hass.data[DATA_RING_STICKUP_CAMS]
lights = []
for device in cameras:
if device.has_capability("light"):
lights.append(RingLight(device))
add_entities(lights, True)
class RingLight(Light):
"""Creates a switch to turn the ring cameras light on and off."""
def __init__(self, device):
"""Initialize the light."""
self._device = device
self._unique_id = self._device.id
self._light_on = False
self._no_updates_until = dt_util.utcnow()
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_RING, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
_LOGGER.debug("Updating Ring light %s (callback)", self.name)
self.async_schedule_update_ha_state(True)
@property
def name(self):
"""Name of the light."""
return f"{self._device.name} light"
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def should_poll(self):
"""Update controlled via the hub."""
return False
@property
def is_on(self):
"""If the switch is currently on or off."""
return self._light_on
def _set_light(self, new_state):
"""Update light state, and causes HASS to correctly update."""
self._device.lights = new_state
self._light_on = new_state == ON_STATE
self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY
self.async_schedule_update_ha_state(True)
def turn_on(self, **kwargs):
"""Turn the light on for 30 seconds."""
self._set_light(ON_STATE)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._set_light(OFF_STATE)
def update(self):
"""Update current state of the light."""
if self._no_updates_until > dt_util.utcnow():
_LOGGER.debug("Skipping update...")
return
self._light_on = self._device.lights == ON_STATE
| apache-2.0 | 2,940,559,475,843,706,400 | 29.212121 | 86 | 0.640923 | false | 3.874352 | false | false | false |
shaz13/oppia | core/domain/email_manager.py | 1 | 36969 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config properties and functions for managing email notifications."""
import datetime
import logging
from core.domain import config_domain
from core.domain import html_cleaner
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
(email_models,) = models.Registry.import_models([models.NAMES.email])
app_identity_services = models.Registry.import_app_identity_services()
email_services = models.Registry.import_email_services()
transaction_services = models.Registry.import_transaction_services()
# Stub for logging.error(), so that it can be swapped out in tests.
def log_new_error(*args, **kwargs):
logging.error(*args, **kwargs)
EMAIL_HTML_BODY_SCHEMA = {
'type': 'unicode',
'ui_config': {
'rows': 20,
}
}
EMAIL_CONTENT_SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'subject',
'schema': {
'type': 'unicode',
},
}, {
'name': 'html_body',
'schema': EMAIL_HTML_BODY_SCHEMA,
}],
}
EMAIL_SENDER_NAME = config_domain.ConfigProperty(
'email_sender_name', {'type': 'unicode'},
'The default sender name for outgoing emails.', 'Site Admin')
EMAIL_FOOTER = config_domain.ConfigProperty(
'email_footer', {'type': 'unicode', 'ui_config': {'rows': 5}},
'The footer to append to all outgoing emails. (This should be written in '
'HTML and include an unsubscribe link.)',
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
_PLACEHOLDER_SUBJECT = 'THIS IS A PLACEHOLDER.'
_PLACEHOLDER_HTML_BODY = 'THIS IS A <b>PLACEHOLDER</b> AND SHOULD BE REPLACED.'
SIGNUP_EMAIL_CONTENT = config_domain.ConfigProperty(
'signup_email_content', EMAIL_CONTENT_SCHEMA,
'Content of email sent after a new user signs up. (The email body should '
'be written with HTML and not include a salutation or footer.) These '
'emails are only sent if the functionality is enabled in feconf.py.',
{
'subject': _PLACEHOLDER_SUBJECT,
'html_body': _PLACEHOLDER_HTML_BODY,
})
EXPLORATION_ROLE_MANAGER = 'manager rights'
EXPLORATION_ROLE_EDITOR = 'editor rights'
EXPLORATION_ROLE_PLAYTESTER = 'playtest access'
EDITOR_ROLE_EMAIL_HTML_ROLES = {
rights_manager.ROLE_OWNER: EXPLORATION_ROLE_MANAGER,
rights_manager.ROLE_EDITOR: EXPLORATION_ROLE_EDITOR,
rights_manager.ROLE_VIEWER: EXPLORATION_ROLE_PLAYTESTER
}
_EDITOR_ROLE_EMAIL_HTML_RIGHTS = {
'can_manage': '<li>Change the exploration permissions</li><br>',
'can_edit': '<li>Edit the exploration</li><br>',
'can_play': '<li>View and playtest the exploration</li><br>'
}
EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE = {
EXPLORATION_ROLE_MANAGER: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_manage'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_edit'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_EDITOR: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_edit'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_PLAYTESTER: _EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']
}
PUBLICIZE_EXPLORATION_EMAIL_HTML_BODY = config_domain.ConfigProperty(
'publicize_exploration_email_html_body', EMAIL_HTML_BODY_SCHEMA,
'Default content for the email sent after an exploration is publicized by '
'a moderator. These emails are only sent if the functionality is enabled '
'in feconf.py. Leave this field blank if emails should not be sent.',
'Congratulations, your exploration has been featured in the Oppia '
'library!')
UNPUBLISH_EXPLORATION_EMAIL_HTML_BODY = config_domain.ConfigProperty(
'unpublish_exploration_email_html_body', EMAIL_HTML_BODY_SCHEMA,
'Default content for the email sent after an exploration is unpublished '
'by a moderator. These emails are only sent if the functionality is '
'enabled in feconf.py. Leave this field blank if emails should not be '
'sent.',
'I\'m writing to inform you that I have unpublished the above '
'exploration.')
SENDER_VALIDATORS = {
feconf.EMAIL_INTENT_SIGNUP: (lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_PUBLICIZE_EXPLORATION: (
lambda x: rights_manager.Actor(x).is_moderator()),
feconf.EMAIL_INTENT_UNPUBLISH_EXPLORATION: (
lambda x: rights_manager.Actor(x).is_moderator()),
feconf.EMAIL_INTENT_DAILY_BATCH: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_MARKETING: (
lambda x: rights_manager.Actor(x).is_admin()),
feconf.EMAIL_INTENT_DELETE_EXPLORATION: (
lambda x: rights_manager.Actor(x).is_moderator()),
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.BULK_EMAIL_INTENT_MARKETING: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_TEST: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value)
}
def _require_sender_id_is_valid(intent, sender_id):
"""Ensure that the sender ID is valid, based on the email's intent.
Many emails are only allowed to be sent by a certain user or type of user,
e.g. 'admin' or an admin/moderator. This function will raise an exception
if the given sender is not allowed to send this type of email.
Args:
intent: str. The intent string, i.e. the purpose of the email.
Valid intent strings are defined in feconf.py.
sender_id: str. The ID of the user sending the email.
Raises:
Exception: The email intent is invalid.
Exception: The sender_id is not appropriate for the given intent.
"""
if intent not in SENDER_VALIDATORS:
raise Exception('Invalid email intent string: %s' % intent)
else:
if not SENDER_VALIDATORS[intent](sender_id):
logging.error(
'Invalid sender_id %s for email with intent \'%s\'' %
(sender_id, intent))
raise Exception(
'Invalid sender_id for email with intent \'%s\'' % intent)
def _send_email(
recipient_id, sender_id, intent, email_subject, email_html_body,
sender_email, bcc_admin=False, sender_name=None, reply_to_id=None):
"""Sends an email to the given recipient.
This function should be used for sending all user-facing emails.
Raises an Exception if the sender_id is not appropriate for the given
intent. Currently we support only system-generated emails and emails
initiated by moderator actions.
Args:
recipient_id: str. The user ID of the recipient.
sender_id: str. The user ID of the sender.
intent: str. The intent string for the email, i.e. the purpose/type.
email_subject: str. The subject of the email.
email_html_body: str. The body (message) of the email.
sender_email: str. The sender's email address.
bcc_admin: bool. Whether to send a copy of the email to the admin's
email address.
sender_name: str or None. The name to be shown in the "sender" field of
the email.
reply_to_id: str or None. The unique reply-to id used in reply-to email
address sent to recipient.
"""
if sender_name is None:
sender_name = EMAIL_SENDER_NAME.value
_require_sender_id_is_valid(intent, sender_id)
recipient_email = user_services.get_email_from_user_id(recipient_id)
cleaned_html_body = html_cleaner.clean(email_html_body)
if cleaned_html_body != email_html_body:
log_new_error(
'Original email HTML body does not match cleaned HTML body:\n'
'Original:\n%s\n\nCleaned:\n%s\n' %
(email_html_body, cleaned_html_body))
return
raw_plaintext_body = cleaned_html_body.replace('<br/>', '\n').replace(
'<br>', '\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)
if email_models.SentEmailModel.check_duplicate_message(
recipient_id, email_subject, cleaned_plaintext_body):
log_new_error(
'Duplicate email:\n'
'Details:\n%s %s\n%s\n\n' %
(recipient_id, email_subject, cleaned_plaintext_body))
return
def _send_email_in_transaction():
sender_name_email = '%s <%s>' % (sender_name, sender_email)
email_services.send_mail(
sender_name_email, recipient_email, email_subject,
cleaned_plaintext_body, cleaned_html_body, bcc_admin,
reply_to_id=reply_to_id)
email_models.SentEmailModel.create(
recipient_id, recipient_email, sender_id, sender_name_email, intent,
email_subject, cleaned_html_body, datetime.datetime.utcnow())
return transaction_services.run_in_transaction(_send_email_in_transaction)
def _send_bulk_mail(
recipient_ids, sender_id, intent, email_subject, email_html_body,
sender_email, sender_name, instance_id=None):
"""Sends an email to all given recipients.
Args:
recipient_ids: list(str). The user IDs of the email recipients.
sender_id: str. The ID of the user sending the email.
intent: str. The intent string, i.e. the purpose of the email.
email_subject: str. The subject of the email.
email_html_body: str. The body (message) of the email.
sender_email: str. The sender's email address.
sender_name: str. The name to be shown in the "sender" field of the
email.
instance_id: str or None. The ID of the BulkEmailModel entity instance.
"""
_require_sender_id_is_valid(intent, sender_id)
recipients_settings = user_services.get_users_settings(recipient_ids)
recipient_emails = [user.email for user in recipients_settings]
cleaned_html_body = html_cleaner.clean(email_html_body)
if cleaned_html_body != email_html_body:
log_new_error(
'Original email HTML body does not match cleaned HTML body:\n'
'Original:\n%s\n\nCleaned:\n%s\n' %
(email_html_body, cleaned_html_body))
return
raw_plaintext_body = cleaned_html_body.replace('<br/>', '\n').replace(
'<br>', '\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)
def _send_bulk_mail_in_transaction(instance_id=None):
sender_name_email = '%s <%s>' % (sender_name, sender_email)
email_services.send_bulk_mail(
sender_name_email, recipient_emails, email_subject,
cleaned_plaintext_body, cleaned_html_body)
if instance_id is None:
instance_id = email_models.BulkEmailModel.get_new_id('')
email_models.BulkEmailModel.create(
instance_id, recipient_ids, sender_id, sender_name_email, intent,
email_subject, cleaned_html_body, datetime.datetime.utcnow())
return transaction_services.run_in_transaction(
_send_bulk_mail_in_transaction, instance_id)
def send_mail_to_admin(email_subject, email_body):
"""Send an email to the admin email address.
The email is sent to the ADMIN_EMAIL_ADDRESS set in feconf.py.
Args:
email_subject: str. Subject of the email.
email_body: str. Body (message) of the email.
"""
app_id = app_identity_services.get_application_id()
body = '(Sent from %s)\n\n%s' % (app_id, email_body)
email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, email_subject,
body, body.replace('\n', '<br/>'), bcc_admin=False)
def send_post_signup_email(user_id):
"""Sends a post-signup email to the given user.
Raises an exception if emails are not allowed to be sent to users (i.e.
feconf.CAN_SEND_EMAILS is False).
Args:
user_id: str. User ID of the user that signed up.
"""
for key, content in SIGNUP_EMAIL_CONTENT.value.iteritems():
if content == SIGNUP_EMAIL_CONTENT.default_value[key]:
log_new_error(
'Please ensure that the value for the admin config property '
'SIGNUP_EMAIL_CONTENT is set, before allowing post-signup '
'emails to be sent.')
return
user_settings = user_services.get_user_settings(user_id)
email_subject = SIGNUP_EMAIL_CONTENT.value['subject']
email_body = 'Hi %s,<br><br>%s<br><br>%s' % (
user_settings.username,
SIGNUP_EMAIL_CONTENT.value['html_body'],
EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_SIGNUP,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def require_valid_intent(intent):
"""Checks if the given intent is valid, and raises an exception if it is
not.
Raises:
Exception: The given intent did not match an entry in
feconf.VALID_MODERATOR_ACTIONS.
"""
if intent not in feconf.VALID_MODERATOR_ACTIONS:
raise Exception('Unrecognized email intent: %s' % intent)
def _get_email_config(intent):
"""Return the default body for the email type matching the given moderator
action intent.
Args:
intent: str. The intent string (cause/purpose) of the email.
Returns:
str. The default body for the email type matching the given moderator
action intent.
"""
require_valid_intent(intent)
return config_domain.Registry.get_config_property(
feconf.VALID_MODERATOR_ACTIONS[intent]['email_config'])
def get_draft_moderator_action_email(intent):
"""Returns a draft of the text of the body for an email sent immediately
following a moderator action. An empty body is a signal to the frontend
that no email will be sent.
Args:
intent: str. The intent string (cause/purpose) of the email.
Returns:
str. Draft of the email body for an email sent after a moderator action,
or an empty string if no email should be sent.
"""
try:
require_moderator_email_prereqs_are_satisfied()
return _get_email_config(intent).value
except Exception:
return ''
def require_moderator_email_prereqs_are_satisfied():
"""Raises an exception if, for any reason, moderator emails cannot be sent.
Raises:
Exception: feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION is False.
Exception: feconf.CAN_SEND_EMAILS is False.
"""
if not feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
raise Exception(
'For moderator emails to be sent, please ensure that '
'REQUIRE_EMAIL_ON_MODERATOR_ACTION is set to True.')
if not feconf.CAN_SEND_EMAILS:
raise Exception(
'For moderator emails to be sent, please ensure that '
'CAN_SEND_EMAILS is set to True.')
def send_moderator_action_email(
sender_id, recipient_id, intent, exploration_title, email_body):
"""Sends a email immediately following a moderator action (publicize,
unpublish, delete) to the given user.
Raises an exception if emails are not allowed to be sent to users (i.e.
feconf.CAN_SEND_EMAILS is False).
Args:
sender_id: str. User ID of the sender.
recipient_id: str. User ID of the recipient.
intent: str. The intent string (cause/purpose) of the email.
exploration_title: str. The title of the exploration on which the
moderator action was taken.
email_body: str. The email content/message.
"""
require_moderator_email_prereqs_are_satisfied()
email_config = feconf.VALID_MODERATOR_ACTIONS[intent]
recipient_user_settings = user_services.get_user_settings(recipient_id)
sender_user_settings = user_services.get_user_settings(sender_id)
email_subject = feconf.VALID_MODERATOR_ACTIONS[intent]['email_subject_fn'](
exploration_title)
email_salutation_html = email_config['email_salutation_html_fn'](
recipient_user_settings.username)
email_signoff_html = email_config['email_signoff_html_fn'](
sender_user_settings.username)
full_email_content = (
'%s<br><br>%s<br><br>%s<br><br>%s' % (
email_salutation_html, email_body, email_signoff_html,
EMAIL_FOOTER.value))
_send_email(
recipient_id, sender_id, intent, email_subject, full_email_content,
feconf.SYSTEM_EMAIL_ADDRESS, bcc_admin=True)
def send_role_notification_email(
inviter_id, recipient_id, recipient_role, exploration_id,
exploration_title):
"""Sends a email when a new user is given activity rights (Manager, Editor,
Viewer) to an exploration by creator of exploration.
Email will only be sent if recipient wants to receive these emails (i.e.
'can_receive_editor_role_email' is set True in recipent's preferences).
Args:
inviter_id: str. ID of the user who invited the recipient to the new
role.
recipient_id: str. User ID of the recipient.
recipient_role: str. Role given to the recipient. Must be defined in
EDITOR_ROLE_EMAIL_HTML_ROLES.
exploration_id: str. ID of the exploration for which the recipient has
been given the new role.
exploration_title: str. Title of the exploration for which the recipient
has been given the new role.
Raises:
Exception: The role is invalid (i.e. not defined in
EDITOR_ROLE_EMAIL_HTML_ROLES).
"""
# Editor role email body and email subject templates.
email_subject_template = (
'%s - invitation to collaborate')
email_body_template = (
'Hi %s,<br>'
'<br>'
'<b>%s</b> has granted you %s to their exploration, '
'"<a href="http://www.oppia.org/create/%s">%s</a>", on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>%s</ul>'
'You can find the exploration '
'<a href="http://www.oppia.org/create/%s">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
# Return from here if sending email is turned off.
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
# Return from here is sending editor role email is disabled.
if not feconf.CAN_SEND_EDITOR_ROLE_EMAILS:
log_new_error('This app cannot send editor role emails to users.')
return
recipient_user_settings = user_services.get_user_settings(recipient_id)
inviter_user_settings = user_services.get_user_settings(inviter_id)
recipient_preferences = user_services.get_email_preferences(recipient_id)
if not recipient_preferences.can_receive_editor_role_email:
# Do not send email if recipient has declined.
return
if recipient_role not in EDITOR_ROLE_EMAIL_HTML_ROLES:
raise Exception(
'Invalid role: %s' % recipient_role)
role_description = EDITOR_ROLE_EMAIL_HTML_ROLES[recipient_role]
rights_html = EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE[role_description]
email_subject = email_subject_template % exploration_title
email_body = email_body_template % (
recipient_user_settings.username, inviter_user_settings.username,
role_description, exploration_id, exploration_title, rights_html,
exploration_id, EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION, email_subject, email_body,
feconf.NOREPLY_EMAIL_ADDRESS,
sender_name=inviter_user_settings.username)
def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):
"""Sends an email to all the subscribers of the creators when the creator
publishes an exploration.
Args:
creator_id: str. The id of the creator who has published an exploration
and to whose subscribers we are sending emails.
exploration_id: str. The id of the exploration which the creator has
published.
exploration_title: str. The title of the exploration which the creator
has published.
"""
creator_name = user_services.get_username(creator_id)
email_subject = ('%s has published a new exploration!' % creator_name)
email_body_template = (
'Hi %s,<br>'
'<br>'
'%s has published a new exploration! You can play it here: '
'<a href="https://www.oppia.org/explore/%s">%s</a><br>'
'<br>'
'Thanks, and happy learning!<br>'
'<br>'
'Best wishes,<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:
log_new_error('This app cannot send subscription emails to users.')
return
recipient_list = subscription_services.get_all_subscribers_of_creator(
creator_id)
recipients_usernames = user_services.get_usernames(recipient_list)
recipients_preferences = user_services.get_users_email_preferences(
recipient_list)
for index, username in enumerate(recipients_usernames):
if recipients_preferences[index].can_receive_subscription_email:
email_body = email_body_template % (
username, creator_name, exploration_id,
exploration_title, EMAIL_FOOTER.value)
_send_email(
recipient_list[index], feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_feedback_message_email(recipient_id, feedback_messages):
"""Sends an email when creator receives feedback message to an exploration.
Args:
recipient_id: str. User ID of recipient.
feedback_messages: dict. Contains feedback messages. Example:
{
'exploration_id': {
'title': 'Exploration 1234',
'messages': ['Feedback message 1', 'Feedback message 2']
}
}
"""
email_subject_template = (
'You\'ve received %s new message%s on your explorations')
email_body_template = (
'Hi %s,<br>'
'<br>'
'You\'ve received %s new message%s on your Oppia explorations:<br>'
'<ul>%s</ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator_dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
if not feedback_messages:
return
recipient_user_settings = user_services.get_user_settings(recipient_id)
messages_html = ''
count_messages = 0
for exp_id, reference in feedback_messages.iteritems():
messages_html += (
'<li><a href="https://www.oppia.org/create/%s#/feedback">'
'%s</a>:<br><ul>' % (exp_id, reference['title']))
for message in reference['messages']:
messages_html += ('<li>%s<br></li>' % message)
count_messages += 1
messages_html += '</ul></li>'
email_subject = email_subject_template % (
(count_messages, 's') if count_messages > 1 else ('a', ''))
email_body = email_body_template % (
recipient_user_settings.username, count_messages if count_messages > 1
else 'a', 's' if count_messages > 1 else '', messages_html,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def can_users_receive_thread_email(
recipient_ids, exploration_id, has_suggestion):
"""Returns if users can receive email.
Args:
recipient_ids: list(str). IDs of persons that should receive the email.
exploration_id: str. ID of exploration that received new message.
has_suggestion: bool. True if thread contains suggestion.
Returns:
list(bool). True if user can receive the email, False otherwise.
"""
users_global_prefs = (
user_services.get_users_email_preferences(recipient_ids))
users_exploration_prefs = (
user_services.get_users_email_preferences_for_exploration(
recipient_ids, exploration_id))
zipped_preferences = zip(users_global_prefs, users_exploration_prefs)
result = []
if has_suggestion:
for user_global_prefs, user_exploration_prefs in zipped_preferences:
result.append(
user_global_prefs.can_receive_feedback_message_email
and not user_exploration_prefs.mute_suggestion_notifications)
else:
for user_global_prefs, user_exploration_prefs in zipped_preferences:
result.append(
user_global_prefs.can_receive_feedback_message_email
and not user_exploration_prefs.mute_feedback_notifications)
return result
def send_suggestion_email(
exploration_title, exploration_id, author_id, recipient_list):
"""Send emails to notify the given recipients about new suggestion.
Each recipient will only be emailed if their email preferences allow for
incoming feedback message emails.
Args:
exploration_title: str. Title of the exploration with the new
suggestion.
exploration_id: str. The ID of the exploration with the new suggestion.
author_id: str. The user ID of the author of the suggestion.
recipient_list: list(str). The user IDs of the email recipients.
"""
email_subject = 'New suggestion for "%s"' % exploration_title
email_body_template = (
'Hi %s,<br>'
'%s has submitted a new suggestion for your Oppia exploration, '
'<a href="https://www.oppia.org/create/%s">"%s"</a>.<br>'
'You can accept or reject this suggestion by visiting the '
'<a href="https://www.oppia.org/create/%s#/feedback">feedback page</a> '
'for your exploration.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
author_settings = user_services.get_user_settings(author_id)
can_users_receive_email = (
can_users_receive_thread_email(recipient_list, exploration_id, True))
for index, recipient_id in enumerate(recipient_list):
recipient_user_settings = user_services.get_user_settings(recipient_id)
if can_users_receive_email[index]:
# Send email only if recipient wants to receive.
email_body = email_body_template % (
recipient_user_settings.username, author_settings.username,
exploration_id, exploration_title, exploration_id,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_instant_feedback_message_email(
recipient_id, sender_id, message, email_subject, exploration_title,
exploration_id, thread_title, reply_to_id=None):
"""Send an email when a new message is posted to a feedback thread, or when
the thread's status is changed.
Args:
recipient_id: str. The user ID of the recipient.
sender_id: str. The user ID of the sender.
message: str. The message text or status change text from the sender.
email_subject: str. The subject line to be sent in the email.
exploration_title: str. The title of the exploration.
exploration_id: str. ID of the exploration the feedback thread is about.
thread_title: str. The title of the feedback thread.
reply_to_id: str or None. The unique reply-to id used in reply-to email
sent to recipient.
"""
email_body_template = (
'Hi %s,<br><br>'
'New update to thread "%s" on '
'<a href="https://www.oppia.org/create/%s#/feedback">%s</a>:<br>'
'<ul><li>%s: %s<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
sender_settings = user_services.get_user_settings(sender_id)
recipient_settings = user_services.get_user_settings(recipient_id)
recipient_preferences = user_services.get_email_preferences(recipient_id)
if recipient_preferences.can_receive_feedback_message_email:
email_body = email_body_template % (
recipient_settings.username, thread_title, exploration_id,
exploration_title, sender_settings.username, message,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS, reply_to_id=reply_to_id)
def send_flag_exploration_email(
exploration_title, exploration_id, reporter_id, report_text):
"""Send an email to all moderators when an exploration is flagged.
Args:
exploration_title: str. The title of the flagged exporation.
exploration_id: str. The ID of the flagged exploration.
reporter_id: str. The user ID of the reporter.
report_text: str. The message entered by the reporter.
"""
email_subject = 'Exploration flagged by user: "%s"' % exploration_title
email_body_template = (
'Hello Moderator,<br>'
'%s has flagged exploration "%s" on the following '
'grounds: <br>'
'%s .<br>'
'You can modify the exploration by clicking '
'<a href="https://www.oppia.org/create/%s">here</a>.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
email_body = email_body_template % (
user_services.get_user_settings(reporter_id).username,
exploration_title, report_text, exploration_id,
EMAIL_FOOTER.value)
recipient_list = config_domain.MODERATOR_IDS.value
for recipient_id in recipient_list:
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_query_completion_email(recipient_id, query_id):
"""Send an email to the initiator of a bulk email query with a link to view
the query results.
Args:
recipient_id: str. The recipient ID.
query_id: str. The query ID.
"""
email_subject = 'Query %s has successfully completed' % query_id
email_body_template = (
'Hi %s,<br>'
'Your query with id %s has succesfully completed its '
'execution. Visit the result page '
'<a href="https://www.oppia.org/emaildashboardresult/%s">here</a> '
'to see result of your query.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = email_body_template % (
recipient_user_settings.username, query_id, query_id,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_query_failure_email(recipient_id, query_id, query_params):
"""Send an email to the initiator of a failed bulk email query.
Args:
recipient_id: str. The recipient ID.
query_id: str. The query ID.
query_params: dict. The parameters of the query, as key:value.
"""
email_subject = 'Query %s has failed' % query_id
email_body_template = (
'Hi %s,<br>'
'Your query with id %s has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = email_body_template % (
recipient_user_settings.username, query_id, EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS)
admin_email_subject = 'Query job has failed.'
admin_email_body_template = (
'Query job with %s query id has failed in its execution.\n'
'Query parameters:\n\n')
for key in sorted(query_params):
admin_email_body_template += '%s: %s\n' % (key, query_params[key])
admin_email_body = admin_email_body_template % query_id
send_mail_to_admin(admin_email_subject, admin_email_body)
def send_user_query_email(
sender_id, recipient_ids, email_subject, email_body, email_intent):
bulk_email_model_id = email_models.BulkEmailModel.get_new_id('')
sender_name = user_services.get_username(sender_id)
sender_email = user_services.get_email_from_user_id(sender_id)
_send_bulk_mail(
recipient_ids, sender_id, email_intent, email_subject, email_body,
sender_email, sender_name, bulk_email_model_id)
return bulk_email_model_id
def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):
tester_name = user_services.get_username(tester_id)
tester_email = user_services.get_email_from_user_id(tester_id)
return _send_email(
tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,
email_subject, email_body, tester_email, sender_name=tester_name)
| apache-2.0 | -7,266,375,526,076,710,000 | 38.245223 | 80 | 0.651221 | false | 3.685108 | true | false | false |
bospetersen/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_NOPASS_error_checkpointRF.py | 1 | 2494 | import sys, shutil
sys.path.insert(1, "../../../")
import h2o, tests
def cars_checkpoint(ip,port):
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
# build first model
model1 = h2o.random_forest(x=cars[predictors],y=cars[response_col],ntrees=10,max_depth=2, min_rows=10)
# continue building the model
model2 = h2o.random_forest(x=cars[predictors],y=cars[response_col],ntrees=11,max_depth=3, min_rows=9,r2_stopping=0.8,
checkpoint=model1._id)
# erroneous, not MODIFIABLE_BY_CHECKPOINT_FIELDS
# PUBDEV-1833
# mtries
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],mtries=2,checkpoint=model1._id)
assert False, "Expected model-build to fail because mtries not modifiable by checkpoint"
except EnvironmentError:
assert True
# sample_rate
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],sample_rate=0.5,checkpoint=model1._id)
assert False, "Expected model-build to fail because sample_rate not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins_cats
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],nbins_cats=99,checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins_cats not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],nbins=99,checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins not modifiable by checkpoint"
except EnvironmentError:
assert True
# balance_classes
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],balance_classes=True,checkpoint=model1._id)
assert False, "Expected model-build to fail because balance_classes not modifiable by checkpoint"
except EnvironmentError:
assert True
# nfolds
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],nfolds=3,checkpoint=model1._id)
assert False, "Expected model-build to fail because nfolds not modifiable by checkpoint"
except EnvironmentError:
assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cars_checkpoint)
| apache-2.0 | -1,062,366,960,171,115,100 | 37.369231 | 121 | 0.677626 | false | 3.402456 | false | false | false |
cmallwitz/Sunflower | application/plugins/find_file_extensions/size.py | 1 | 2603 | from gi.repository import Gtk
from plugin_base.find_extension import FindExtension
class SizeFindFiles(FindExtension):
"""Size extension for find files tool"""
def __init__(self, parent):
FindExtension.__init__(self, parent)
# create container
table = Gtk.Table(2, 4, False)
table.set_border_width(5)
table.set_col_spacings(5)
# create interface
self._adjustment_max = Gtk.Adjustment(value=50.0, lower=0.0, upper=100000.0, step_incr=0.1, page_incr=10.0)
self._adjustment_min = Gtk.Adjustment(value=0.0, lower=0.0, upper=10.0, step_incr=0.1, page_incr=10.0)
label = Gtk.Label(label='<b>{0}</b>'.format(_('Match file size')))
label.set_alignment(0.0, 0.5)
label.set_use_markup(True)
label_min = Gtk.Label(label=_('Minimum:'))
label_min.set_alignment(0, 0.5)
label_min_unit = Gtk.Label(label=_('MB'))
label_max = Gtk.Label(label=_('Maximum:'))
label_max.set_alignment(0, 0.5)
label_max_unit = Gtk.Label(label=_('MB'))
self._entry_max = Gtk.SpinButton(adjustment=self._adjustment_max, digits=2)
self._entry_min = Gtk.SpinButton(adjustment=self._adjustment_min, digits=2)
self._entry_max.connect('value-changed', self._max_value_changed)
self._entry_min.connect('value-changed', self._min_value_changed)
self._entry_max.connect('activate', self._parent.find_files)
self._entry_min.connect('activate', lambda entry: self._entry_max.grab_focus())
# pack interface
table.attach(label, 0, 3, 0, 1, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_min, 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL)
table.attach(self._entry_min, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_min_unit, 2, 3, 1, 2, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_max, 0, 1, 2, 3, xoptions=Gtk.AttachOptions.FILL)
table.attach(self._entry_max, 1, 2, 2, 3, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_max_unit, 2, 3, 2, 3, xoptions=Gtk.AttachOptions.FILL)
self.vbox.pack_start(table, False, False, 0)
def _max_value_changed(self, entry):
"""Assign value to adjustment handler"""
self._adjustment_min.set_upper(entry.get_value())
def _min_value_changed(self, entry):
"""Assign value to adjustment handler"""
self._adjustment_max.set_lower(entry.get_value())
def get_title(self):
"""Return i18n title for extension"""
return _('Size')
def is_path_ok(self, path):
"""Check is specified path fits the cirteria"""
size = self._parent._provider.get_stat(path).size
size_max = self._entry_max.get_value() * 1048576
size_min = self._entry_min.get_value() * 1048576
return size_min < size < size_max
| gpl-3.0 | 2,342,140,416,426,739,700 | 36.724638 | 109 | 0.698425 | false | 2.8171 | false | false | false |
andrei-milea/unbiased | web/backend/urls.py | 1 | 1264 | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include, re_path
from django.contrib import admin
from django.contrib.auth import views as auth_views
from backend import views
urlpatterns = [
re_path(r'^$', views.home, name='home'),
re_path(r'^contribute/', views.contribute_page, name='contribute_page'),
re_path(r'^admin/', admin.site.urls),
re_path(r'^signup/$', views.signup, name='signup'),
re_path(r'^login/$', auth_views.LoginView.as_view(), name='login'),
re_path(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'),
re_path(r'^oauth/', include('social_django.urls', namespace='social'))
]
| gpl-3.0 | 2,791,782,861,359,301,000 | 42.586207 | 79 | 0.689082 | false | 3.407008 | false | false | false |
emory-libraries/eulcore-history | src/eulcore/django/existdb/tests.py | 1 | 7136 | # file django/existdb/tests.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
import os
import unittest
from urlparse import urlsplit, urlunsplit
from django.conf import settings
from eulcore import xmlmap
from eulcore.django.existdb.db import ExistDB
from eulcore.django.existdb.manager import Manager
from eulcore.django.existdb.models import XmlModel
from eulcore.django.existdb.templatetags.existdb import exist_matches
import eulcore.existdb as nondjangoexistdb
from eulcore.existdb.db import EXISTDB_NAMESPACE
from eulcore.xmlmap import XmlObject
# minimal testing here to confirm djangoified ExistDB works;
# more extensive tests are in test_existdb
class ExistDBTest(unittest.TestCase):
COLLECTION = settings.EXISTDB_TEST_COLLECTION
def setUp(self):
self.db = ExistDB()
self.db.createCollection(self.COLLECTION, True)
# rudimentary example of loading exist fixture from a file
module_path = os.path.split(__file__)[0]
fixture = os.path.join(module_path, 'exist_fixtures', 'hello.xml')
self.db.load(open(fixture), self.COLLECTION + '/hello.xml', True)
# save exist configurations modified by some tests
self._EXISTDB_SERVER_URL = getattr(settings, 'EXISTDB_SERVER_URL', None)
self._EXISTDB_SERVER_USER = getattr(settings, 'EXISTDB_SERVER_USER', None)
self._EXISTDB_SERVER_PASSWORD = getattr(settings, 'EXISTDB_SERVER_PASSWORD', None)
def tearDown(self):
self.db.removeCollection(self.COLLECTION)
# restore exist settings
setattr(settings, 'EXISTDB_SERVER_URL', self._EXISTDB_SERVER_URL)
setattr(settings, 'EXISTDB_SERVER_USER', self._EXISTDB_SERVER_USER)
setattr(settings, 'EXISTDB_SERVER_PASSWORD', self._EXISTDB_SERVER_PASSWORD)
def test_init(self):
self.assert_(isinstance(self.db, nondjangoexistdb.db.ExistDB))
self.assert_(isinstance(self.db, ExistDB))
def test_getDocument(self):
"""Retrieve document loaded via file fixture"""
xml = self.db.getDocument(self.COLLECTION + "/hello.xml")
self.assertEquals(xml, "<hello>World</hello>")
def test_failed_authentication_from_settings(self):
"""Check that initializing ExistDB with invalid django settings raises exception"""
try:
#passwords can be specified in localsettings.py
# overwrite (and then restore) to ensure that authentication fails
server_url = settings.EXISTDB_SERVER_URL
parts = urlsplit(settings.EXISTDB_SERVER_URL)
netloc = 'bad_user:bad_password@' + parts.hostname
if parts.port:
netloc += ':' + str(parts.port)
bad_uri = urlunsplit((parts.scheme, netloc, parts.path, parts.query, parts.fragment))
settings.EXISTDB_SERVER_URL = bad_uri
test_db = ExistDB()
self.assertRaises(nondjangoexistdb.db.ExistDBException,
test_db.hasCollection, self.COLLECTION)
finally:
settings.EXISTDB_SERVER_URL = server_url
def test_get_exist_url(self):
# test constructing url based on multiple possible configurations
user = settings.EXISTDB_SERVER_USER
pwd = settings.EXISTDB_SERVER_PASSWORD
scheme, sep, host = settings.EXISTDB_SERVER_URL.partition('//')
# with username & password
self.assertEqual(scheme + sep + user + ':' + pwd + '@' + host,
self.db._get_exist_url())
# username but no password
delattr(settings, 'EXISTDB_SERVER_PASSWORD')
self.assertEqual(scheme + sep + user + '@' + host, self.db._get_exist_url())
# no credentials
delattr(settings, 'EXISTDB_SERVER_USER')
self.assertEqual(settings.EXISTDB_SERVER_URL, self.db._get_exist_url())
class PartingBase(xmlmap.XmlObject):
'''A plain XmlObject comparable to how one might be defined in
production code.'''
exclamation = xmlmap.StringField('exclamation')
target = xmlmap.StringField('target')
class Parting(XmlModel, PartingBase):
'''An XmlModel can derive from an XmlObject to incorporate its
fields.'''
objects = Manager('/parting')
class ModelTest(unittest.TestCase):
COLLECTION = settings.EXISTDB_TEST_COLLECTION
def setUp(self):
self.db = ExistDB()
self.db.createCollection(self.COLLECTION, True)
module_path = os.path.split(__file__)[0]
fixture = os.path.join(module_path, 'exist_fixtures', 'goodbye-english.xml')
self.db.load(open(fixture), self.COLLECTION + '/goodbye-english.xml', True)
fixture = os.path.join(module_path, 'exist_fixtures', 'goodbye-french.xml')
self.db.load(open(fixture), self.COLLECTION + '/goodbye-french.xml', True)
def tearDown(self):
self.db.removeCollection(self.COLLECTION)
def test_manager(self):
partings = Parting.objects.all()
self.assertEquals(2, partings.count())
class ExistMatchTestCase(unittest.TestCase):
# test exist_match template tag explicitly
SINGLE_MATCH = """<abstract>Pitts v. <exist:match xmlns:exist="%s">Freeman</exist:match>
school desegregation case files</abstract>""" % EXISTDB_NAMESPACE
MULTI_MATCH = """<title>Pitts v. <exist:match xmlns:exist="%(ex)s">Freeman</exist:match>
<exist:match xmlns:exist="%(ex)s">school</exist:match> <exist:match xmlns:exist="%(ex)s">desegregation</exist:match>
case files</title>""" % {'ex': EXISTDB_NAMESPACE}
def setUp(self):
self.content = XmlObject(etree.fromstring(self.SINGLE_MATCH)) # placeholder
def test_single_match(self):
self.content.node = etree.fromstring(self.SINGLE_MATCH)
format = exist_matches(self.content)
self.assert_('Pitts v. <span class="exist-match">Freeman</span>'
in format, 'exist:match tag converted to span for highlighting')
def test_multiple_matches(self):
self.content.node = etree.fromstring(self.MULTI_MATCH)
format = exist_matches(self.content)
self.assert_('Pitts v. <span class="exist-match">Freeman</span>'
in format, 'first exist:match tag converted')
self.assert_('<span class="exist-match">school</span> <span class="exist-match">desegregation</span>'
in format, 'second and third exist:match tags converted')
| apache-2.0 | -6,928,753,692,045,739,000 | 40.987952 | 116 | 0.663817 | false | 3.871948 | true | false | false |
gwu-libraries/launchpad | lp/ui/management/commands/make_sitemap.py | 1 | 2292 | import gzip
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections
def _newfile(counter):
"""Generate a new sitemap filename based on count."""
name = '%s/sitemap-%s.xml.gz' % (settings.SITEMAPS_DIR,
counter)
fp = gzip.open(name, 'wb')
fp.write("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
return fp
def _newurl(counter):
"""Generate the <loc> URL for a sitemap file based on count."""
return "%s/sitemap-%s.xml.gz" % (settings.SITEMAPS_BASE_URL, counter)
class Command(BaseCommand):
help = 'Generate sitemap files'
def handle(self, *args, **options):
# first, clear out the existing files
print 'Removing old files'
for old_file in os.listdir(settings.SITEMAPS_DIR):
os.remove('%s/%s' % (settings.SITEMAPS_DIR, old_file))
print 'Generating maps'
cursor = connections['voyager'].cursor()
query = """SELECT BIB_ID FROM bib_master
WHERE SUPPRESS_IN_OPAC = 'N'
"""
cursor.execute(query)
index_file = '%s/sitemap-index.xml' % settings.SITEMAPS_DIR
fp_index = open(index_file, 'wb')
fp_index.write("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
i = j = 0
fp = _newfile(j)
line = "<sitemap><loc>%s</loc></sitemap>\n" % _newurl(j)
fp_index.write(line)
row = cursor.fetchone()
while row:
line = '<url><loc>%s/item/%s</loc></url>\n' % \
(settings.SITEMAPS_BASE_URL, row[0])
fp.write(line)
if i == 49990:
i = 0
j += 1
fp.write('</urlset>')
fp.close()
fp = _newfile(j)
line = "<sitemap><loc>%s</loc></sitemap>\n" % _newurl(j)
fp_index.write(line)
print '%s - %s' % (j, row[0])
else:
i += 1
row = cursor.fetchone()
if fp:
fp.write('</urlset>\n')
fp.close()
fp_index.write("""</sitemapindex>\n""")
fp_index.close()
| mit | 98,821,271,441,966,880 | 33.208955 | 73 | 0.538394 | false | 3.493902 | false | false | false |
nurnbeck/project-2-CMPUT-291 | ret_KEY.py | 1 | 1724 | import os
import time
import bsddb3 as bsddb
'''
Retrieve records with a given key
- Modified and simplified based on the old version
- Has the same format and assumption as ret_DATA()
Tested under DB_SIZE = 10
'''
DB_FILE = "/tmp/yishuo_db/sample_db"
SDB_FILE = "/tmp/yishuo_db/IndexFile"
def ret_KEY(filetype):
if filetype == 'btree':
db = bsddb.btopen(DB_FILE, 'r')
elif filetype == 'hash':
db = bsddb.hashopen(DB_FILE, 'r')
elif filetype == 'indexfile':
db = bsddb.btopen(DB_FILE, 'r')
indexfile = bsddb.hashopen(SDB_FILE, 'r')
else:
print("Unknown type, function terminated\n")
return
answers = open('answers', 'a')
result_lst = []
tkey = input("Enter the key you want to search > ")
tkey = tkey.encode(encoding = 'UTF-8')
start_time = time.time()
for key in db.keys():
if tkey == key:
result_lst.append(key.decode(encoding = 'UTF-8'))
end_time = time.time()
elapse_time = (end_time - start_time) * 1000000
print("Result:")
if result_lst:
for key in result_lst:
print('Key:', key)
answers.write(key)
answers.write('\n')
key = key.encode(encoding = 'UTF-8')
data = db[key]
data = data.decode(encoding = 'UTF-8')
print('Data:', data)
answers.write(data)
answers.write('\n')
answers.write('\n')
else:
print("Data not found")
print()
print(len(result_lst), "record(s) received")
print("Used", elapse_time, "micro seconds")
print()
answers.close()
db.close()
if filetype == 'indexfile':
indexfile.close()
return
| mit | -4,234,728,709,192,996,000 | 25.9375 | 61 | 0.567285 | false | 3.448 | false | false | false |
biemond/biemond-orawls | files/wlst/utils.py | 1 | 7556 |
def create_boot_properties_file(directory_path, file_name, username, password):
server_dir = File(directory_path)
server_dir.mkdirs()
full_file_name = directory_path + '/' + file_name
file_new = open(full_file_name, 'w')
file_new.write('username=%s\n' % username)
file_new.write('password=%s\n' % password)
file_new.flush()
file_new.close()
os.system('chmod 600 ' + full_file_name)
def create_admin_startup_properties_file(directory_path, args):
adminserver_dir = File(directory_path)
adminserver_dir.mkdirs()
full_file_name = directory_path + '/startup.properties'
file_new = open(full_file_name, 'w')
args = args.replace(':', '\\:')
args = args.replace('=', '\\=')
file_new.write('Arguments=%s\n' % args)
file_new.flush()
file_new.close()
os.system('chmod 600 ' + full_file_name)
def create_machine(machine_type, name, address, nodemanager_secure_listener):
cd('/')
create(name, machine_type)
cd(machine_type + '/' + name)
create(name, 'NodeManager')
cd('NodeManager/' + name)
if nodemanager_secure_listener == True:
set('NMType', 'SSL')
else:
set('NMType', 'Plain')
set('ListenAddress', address)
def change_datasource(datasource, username, password, db_url):
print 'Change datasource ' + datasource
cd('/')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDriverParams/NO_NAME_0')
set('URL', db_url)
set('PasswordEncrypted', password)
cd('Properties/NO_NAME_0/Property/user')
set('Value', username)
cd('/')
def change_datasource_driver(datasource, username, password, db_url):
print 'change_datasource_driver ' + datasource
cd('/')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDriverParams/NO_NAME_0')
set('URL', db_url)
set('DriverName', 'oracle.jdbc.OracleDriver')
set('PasswordEncrypted', password)
cd('Properties/NO_NAME_0/Property/user')
set('Value', username)
cd('/')
def change_datasource_to_xa(datasource):
print 'change_datasource_to_xa ' + datasource
cd('/')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDriverParams/NO_NAME_0')
set('DriverName', 'oracle.jdbc.xa.client.OracleXADataSource')
set('UseXADataSourceInterface', 'True')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDataSourceParams/NO_NAME_0')
set('GlobalTransactionsProtocol', 'TwoPhaseCommit')
cd('/')
def create_opss_datasource(target, prefix, password, db_url):
cd('/')
create('opssDataSource', 'JDBCSystemResource')
cd('/JDBCSystemResource/opssDataSource')
set('Target', target)
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
cmo.setName('opssDataSource')
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
create('myJdbcDataSourceParams', 'JDBCDataSourceParams')
cd('JDBCDataSourceParams/NO_NAME_0')
set('JNDIName', 'jdbc/opssDataSource')
set('GlobalTransactionsProtocol', 'None')
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
create('myJdbcDriverParams', 'JDBCDriverParams')
cd('JDBCDriverParams/NO_NAME_0')
set('DriverName', 'oracle.jdbc.OracleDriver')
set('URL', db_url)
set('PasswordEncrypted', password)
set('UseXADataSourceInterface', 'false')
create('myProperties', 'Properties')
cd('Properties/NO_NAME_0')
create('user', 'Property')
cd('Property')
cd('user')
set('Value', prefix + '_OPSS')
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
create('myJdbcConnectionPoolParams', 'JDBCConnectionPoolParams')
cd('JDBCConnectionPoolParams/NO_NAME_0')
set('TestTableName', 'SQL SELECT 1 FROM DUAL')
def change_log(wls_type, name, log_folder):
if wls_type == 'server':
cd('/Server/' + name)
create(name, 'Log')
cd('/Server/' + name + '/Log/' + name)
else:
cd('/')
create('base_domain', 'Log')
cd('/Log/base_domain')
set('FileName', log_folder + '/' + name + '.log')
set('FileCount', 10)
set('FileMinSize', 5000)
set('RotationType', 'byTime')
set('FileTimeSpan', 24)
def change_ssl_with_port(server, jsse_enabled, ssl_listen_port):
cd('/Server/' + server)
create(server, 'SSL')
cd('SSL/' + server)
set('HostNameVerificationIgnored', 'True')
if ssl_listen_port:
set('Enabled', 'True')
set('ListenPort', int(ssl_listen_port))
else:
set('Enabled', 'False')
if jsse_enabled == True:
set('JSSEEnabled', 'True')
else:
set('JSSEEnabled', 'False')
def change_ssl(server, jsse_enabled):
change_ssl_with_port(server, jsse_enabled, None)
def change_server_arguments(server, java_arguments):
print 'change_server_arguments for server ' + server
cd('/Servers/' + server)
cd('ServerStart/' + server)
set('Arguments', java_arguments)
def change_default_server_attributes(server, machine, address, port, java_arguments, java_home):
print 'change_default_server_attributes for server ' + server
cd('/Servers/' + server)
if machine:
set('Machine', machine)
if address:
set('ListenAddress', address)
if port:
set('ListenPort', port)
create(server, 'ServerStart')
cd('ServerStart/' + server)
set('Arguments', java_arguments)
set('JavaVendor', 'Sun')
set('JavaHome', java_home)
def change_managed_server(server, machine, address, port, java_arguments, log_folder, java_home, jsse_enabled):
change_default_server_attributes(server, machine, address, port, java_arguments, java_home)
change_ssl(server, jsse_enabled)
change_log('server', server, log_folder)
def change_admin_server(adminserver, machine, address, port, java_arguments, java_home):
cd('/Servers/AdminServer')
set('Name', adminserver)
change_default_server_attributes(adminserver, machine, address, port, java_arguments, java_home)
def change_custom_identity_store(server, ks_filename, ks_passphrase, trust_ks_filename, trust_ks_passphrase, alias, alias_passphrase):
print 'set custom identity'
cd('/Server/' + server)
set('KeyStores', 'CustomIdentityAndCustomTrust')
set('CustomIdentityKeyStoreFileName', ks_filename)
set('CustomIdentityKeyStorePassPhraseEncrypted', ks_passphrase)
set('CustomTrustKeyStoreFileName', trust_ks_filename)
set('CustomTrustKeyStorePassPhraseEncrypted', trust_ks_passphrase)
cd('SSL/' + server)
set('ServerPrivateKeyAlias', alias)
set('ServerPrivateKeyPassPhraseEncrypted', alias_passphrase)
def set_domain_password(domain, password):
print 'set domain password...'
cd('/SecurityConfiguration/' + domain)
set('CredentialEncrypted', password)
def set_nodemanager_password(domain, password, username):
print 'set nodemanager password...'
cd('/SecurityConfiguration/' + domain)
set('NodeManagerUsername', username)
set('NodeManagerPasswordEncrypted', password)
def set_weblogic_password(username, password):
print 'set weblogic password...'
cd('/Security/base_domain/User/weblogic')
set('Name', username)
cmo.setPassword(password)
def set_cross_domain(crossdomain_enabled):
print 'set crossdomain'
cd('/')
create('base_domain', 'SecurityConfiguration')
cd('/SecurityConfiguration/base_domain')
set('CrossDomainSecurityEnabled', crossdomain_enabled)
| apache-2.0 | 6,416,536,408,274,387,000 | 32.286344 | 134 | 0.674034 | false | 3.569202 | false | false | false |
LukasRychtecky/django-chamber | chamber/migrations/fixtures.py | 1 | 1343 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from six.moves import cStringIO
from django.core.management import call_command
from django.core.serializers import base, python
class MigrationLoadFixture(object):
def __init__(self, migration_file, fixture_dir=None, fixture_filename=None, fixture_type='json'):
self.migration_file = migration_file
self.fixture_dir = fixture_dir or os.path.abspath(os.path.join(os.path.dirname(migration_file), 'fixtures'))
self.fixture_filename = fixture_filename or '%s.%s' % (
os.path.basename(migration_file).rsplit('.', 1)[0], fixture_type
)
def __call__(self, apps, schema_editor):
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
python._get_model = _get_model
file = os.path.join(self.fixture_dir, self.fixture_filename)
if not os.path.isfile(file):
raise IOError('File "%s" does not exists' % file)
call_command('loaddata', file, stdout=cStringIO())
| lgpl-3.0 | -2,368,341,324,001,750,500 | 37.371429 | 116 | 0.633656 | false | 3.938416 | false | false | false |
golsun/GPS | src/ck/def_cheminp.py | 1 | 2124 | from def_build_mech_dict import *
import os
import shutil
def rename_sp(sp_list):
sp_list_new = []
for s in sp_list:
sp_list_new.append(s.replace("(","-").replace(")","-").replace(",","-"))
return sp_list_new
def skeletal(detailed_folder, sk_folder, species_kept, notes=None):
if not os.path.exists(sk_folder):
os.makedirs(sk_folder)
if detailed_folder != sk_folder:
shutil.copyfile(os.path.join(detailed_folder,'therm.dat'), os.path.join(sk_folder,'therm.dat'))
trandat = os.path.join(detailed_folder,'tran.dat')
try:
ft = open(trandat,'r')
ft.close()
shutil.copyfile(trandat, os.path.join(sk_folder,'tran.dat'))
except IOError:
pass
sk_inp = os.path.join(sk_folder,'chem.inp')
mech = build_mech(detailed_folder,overwrite=False)
rxn_all = mech['reaction']
f = open(sk_inp,'w')
if notes is not None:
for note in notes:
f.write(note+'\n')
f.write('\n')
f.write('ELEMENTS\n')
for e in mech['element'].keys():
f.write(e + ' ')
f.write('\nEND\n\n')
f.write('SPECIES\n')
n = 0
for s in species_kept:
f.write(s + ' ')
n += 1
if n == 5:
f.write('\n')
n = 0
if n != 0:
f.write('\n')
f.write('END\n\n')
f.write('REACTIONS\n')
rxn_kept = []
for rxn in rxn_all:
if all(member in species_kept for member in rxn_all[rxn]['member'].keys()):
n_ln = 0
for info in rxn_all[rxn]['info']:
if n_ln > 0:
f.write(' ')
if '/' in info and \
('LOW' not in info.upper()) and ('TROE' not in info.upper()) \
and ('REV' not in info.upper()):
# this line describes three-body collision * efficiency *
# we should remove these not included in mech
ss = info.split('/')
info = ''
for i in range(len(ss)):
s = ss[i].strip()
if s in species_kept:
info += (ss[i] + '/' + ss[i+1] + '/')
f.write(info.strip() + '\n')
n_ln += 1
if n_ln > 1:
f.write('\n')
f.write('END\n\n')
f.close()
def test_sk():
detailed = 'test/gri30/'
sk_inp = 'test/gri30/reduced'
species_kept = ['H','HCO','CH2O','AR']
skeletal(detailed, sk_inp, species_kept)
if __name__ == '__main__':
test_sk() | mit | -1,599,475,815,948,921,600 | 19.833333 | 97 | 0.586158 | false | 2.510638 | false | false | false |
t1g0r/ramey | src/backend/libs/telepot/async/__init__.py | 1 | 18967 | import io
import json
import time
import asyncio
import aiohttp
import traceback
import re
from requests.utils import guess_filename
from concurrent.futures._base import CancelledError
import collections
import telepot
import telepot.async.helper
from ..exception import BadFlavor, BadHTTPResponse, TelegramError
def flavor_router(routing_table):
router = telepot.async.helper.Router(telepot.flavor, routing_table)
return router.route
class Bot(telepot._BotBase):
def __init__(self, token, loop=None):
super(Bot, self).__init__(token)
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._router = telepot.async.helper.Router(telepot.flavor, {'normal': telepot.async.helper._delay_yell(self, 'on_chat_message'),
'inline_query': telepot.async.helper._delay_yell(self, 'on_inline_query'),
'chosen_inline_result': telepot.async.helper._delay_yell(self, 'on_chosen_inline_result')})
@property
def loop(self):
return self._loop
@asyncio.coroutine
def handle(self, msg):
yield from self._router.route(msg)
@asyncio.coroutine
def _parse(self, response):
try:
data = yield from response.json()
except ValueError:
text = yield from response.text()
raise BadHTTPResponse(response.status, text)
if data['ok']:
return data['result']
else:
description, error_code = data['description'], data['error_code']
# Look for specific error ...
for e in TelegramError.__subclasses__():
n = len(e.DESCRIPTION_PATTERNS)
if any(map(re.search, e.DESCRIPTION_PATTERNS, n*[description], n*[re.IGNORECASE])):
raise e(description, error_code)
# ... or raise generic error
raise TelegramError(description, error_code)
@asyncio.coroutine
def getMe(self):
r = yield from asyncio.wait_for(
aiohttp.post(self._methodurl('getMe')),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def sendMessage(self, chat_id, text, parse_mode=None, disable_web_page_preview=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendMessage'),
data=self._rectify(p, allow_namedtuple=['reply_markup'])),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def forwardMessage(self, chat_id, from_chat_id, message_id, disable_notification=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('forwardMessage'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def _sendFile(self, inputfile, filetype, params):
method = {'photo': 'sendPhoto',
'audio': 'sendAudio',
'document': 'sendDocument',
'sticker': 'sendSticker',
'video': 'sendVideo',
'voice': 'sendVoice',}[filetype]
if telepot._isstring(inputfile):
params[filetype] = inputfile
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl(method),
data=self._rectify(params, allow_namedtuple=['reply_markup'])),
self._http_timeout)
else:
if isinstance(inputfile, tuple):
if len(inputfile) == 2:
filename, fileobj = inputfile
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
else:
filename, fileobj = guess_filename(inputfile) or filetype, inputfile
mpwriter = aiohttp.MultipartWriter('form-data')
part = mpwriter.append(fileobj)
part.set_content_disposition('form-data', name=filetype, filename=filename)
r = yield from aiohttp.post(
self._methodurl(method),
params=self._rectify(params, allow_namedtuple=['reply_markup']),
data=mpwriter)
# `_http_timeout` is not used here because, for some reason, the larger the file,
# the longer it takes for the server to respond (after upload is finished). It is hard to say
# what value `_http_timeout` should be. In the future, maybe I should let user specify.
return (yield from self._parse(r))
@asyncio.coroutine
def sendPhoto(self, chat_id, photo, caption=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['photo'])
return (yield from self._sendFile(photo, 'photo', p))
@asyncio.coroutine
def sendAudio(self, chat_id, audio, duration=None, performer=None, title=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['audio'])
return (yield from self._sendFile(audio, 'audio', p))
@asyncio.coroutine
def sendDocument(self, chat_id, document, caption=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['document'])
return (yield from self._sendFile(document, 'document', p))
@asyncio.coroutine
def sendSticker(self, chat_id, sticker, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['sticker'])
return (yield from self._sendFile(sticker, 'sticker', p))
@asyncio.coroutine
def sendVideo(self, chat_id, video, duration=None, width=None, height=None, caption=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['video'])
return (yield from self._sendFile(video, 'video', p))
@asyncio.coroutine
def sendVoice(self, chat_id, voice, duration=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['voice'])
return (yield from self._sendFile(voice, 'voice', p))
@asyncio.coroutine
def sendLocation(self, chat_id, latitude, longitude, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendLocation'),
data=self._rectify(p, allow_namedtuple=['reply_markup'])),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def sendChatAction(self, chat_id, action):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendChatAction'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getUserProfilePhotos'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getFile(self, file_id):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getFile'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getUpdates(self, offset=None, limit=None, timeout=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getUpdates'),
data=self._rectify(p)),
self._http_timeout+(0 if timeout is None else timeout)
)
return (yield from self._parse(r))
@asyncio.coroutine
def setWebhook(self, url=None, certificate=None):
p = self._strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('setWebhook'),
params=self._rectify(p),
data=files),
self._http_timeout)
else:
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('setWebhook'),
data=self._rectify(p)),
self._http_timeout)
return (yield from self._parse(r))
@asyncio.coroutine
def downloadFile(self, file_id, dest):
f = yield from self.getFile(file_id)
# `file_path` is optional in File object
if 'file_path' not in f:
raise TelegramError('No file_path returned', None)
try:
r = yield from asyncio.wait_for(
aiohttp.get(self._fileurl(f['file_path'])),
self._http_timeout)
d = dest if isinstance(dest, io.IOBase) else open(dest, 'wb')
while 1:
chunk = yield from r.content.read(self._file_chunk_size)
if not chunk:
break
d.write(chunk)
d.flush()
finally:
if not isinstance(dest, io.IOBase) and 'd' in locals():
d.close()
if 'r' in locals():
r.close()
@asyncio.coroutine
def answerInlineQuery(self, inline_query_id, results, cache_time=None, is_personal=None, next_offset=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('answerInlineQuery'),
data=self._rectify(p, allow_namedtuple=['results'])),
timeout=self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def messageLoop(self, handler=None, source=None, ordered=True, maxhold=3):
if handler is None:
handler = self.handle
elif isinstance(handler, dict):
handler = flavor_router(handler)
def create_task_for(msg):
self.loop.create_task(handler(msg))
if asyncio.iscoroutinefunction(handler):
callback = create_task_for
else:
callback = handler
def handle(update):
try:
if 'message' in update:
callback(update['message'])
elif 'inline_query' in update:
callback(update['inline_query'])
elif 'chosen_inline_result' in update:
callback(update['chosen_inline_result'])
else:
# Do not swallow. Make sure developer knows.
raise BadFlavor(update)
except:
# Localize the error so message thread can keep going.
traceback.print_exc()
finally:
return update['update_id']
@asyncio.coroutine
def get_from_telegram_server():
offset = None # running offset
while 1:
try:
result = yield from self.getUpdates(offset=offset, timeout=20)
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([handle(update) for update in result]) + 1
except CancelledError:
raise
except:
traceback.print_exc()
yield from asyncio.sleep(0.1)
else:
yield from asyncio.sleep(0.1)
def dictify(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
@asyncio.coroutine
def get_from_queue_unordered(qu):
while 1:
try:
data = yield from qu.get()
update = dictify(data)
handle(update)
except:
traceback.print_exc()
@asyncio.coroutine
def get_from_queue(qu):
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = yield from asyncio.wait_for(qu.get(), qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except asyncio.TimeoutError:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
if source is None:
yield from get_from_telegram_server()
elif isinstance(source, asyncio.Queue):
if ordered:
yield from get_from_queue(source)
else:
yield from get_from_queue_unordered(source)
else:
raise ValueError('Invalid source')
class SpeakerBot(Bot):
def __init__(self, token, loop=None):
super(SpeakerBot, self).__init__(token, loop)
self._mic = telepot.async.helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = asyncio.Queue()
self._mic.add(q)
ln = telepot.async.helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns, loop=None):
super(DelegatorBot, self).__init__(token, loop)
self._delegate_records = [p+({},) for p in delegation_patterns]
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_coroutine_obj, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or dict[id].done():
c = make_coroutine_obj((self, msg, id))
if not asyncio.iscoroutine(c):
raise RuntimeError('You must produce a coroutine *object* as delegate.')
dict[id] = self._loop.create_task(c)
else:
c = make_coroutine_obj((self, msg, id))
self._loop.create_task(c)
| gpl-3.0 | 9,221,394,431,815,405,000 | 38.597077 | 166 | 0.510782 | false | 4.634009 | false | false | false |
ibis-project/ibis | ibis/backends/impala/tests/test_parquet_ddl.py | 1 | 2838 | from posixpath import join as pjoin
import pytest
import ibis
from ibis.backends.impala.compat import HS2Error
from ibis.tests.util import assert_equal
pytestmark = pytest.mark.impala
def test_cleanup_tmp_table_on_gc(con, test_data_dir):
import gc
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
name = table.op().name
table = None
gc.collect()
assert not con.exists_table(name)
def test_persist_parquet_file_with_name(con, test_data_dir, temp_table_db):
import gc
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
tmp_db, name = temp_table_db
schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
con.parquet_file(
hdfs_path, schema=schema, name=name, database=tmp_db, persist=True
)
gc.collect()
# table still exists
con.table(name, database=tmp_db)
def test_query_parquet_file_with_schema(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
table = con.parquet_file(hdfs_path, schema=schema)
name = table.op().name
# table exists
con.table(name)
expr = table.r_name.value_counts()
expr.execute()
assert table.count().execute() == 5
def test_query_parquet_file_like_table(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
ex_schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
table = con.parquet_file(hdfs_path, like_table='tpch_region')
assert_equal(table.schema(), ex_schema)
def test_query_parquet_infer_schema(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
# NOTE: the actual schema should have an int16, but bc this is being
# inferred from a parquet file, which has no notion of int16, the
# inferred schema will have an int32 instead.
ex_schema = ibis.schema(
[
('r_regionkey', 'int32'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
assert_equal(table.schema(), ex_schema)
def test_create_table_persist_fails_if_called_twice(
con, temp_table_db, test_data_dir
):
tmp_db, tname = temp_table_db
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
con.parquet_file(hdfs_path, name=tname, persist=True, database=tmp_db)
with pytest.raises(HS2Error):
con.parquet_file(hdfs_path, name=tname, persist=True, database=tmp_db)
| apache-2.0 | 6,085,975,110,491,497,000 | 24.339286 | 78 | 0.610641 | false | 3.146341 | true | false | false |
speed-of-light/pyslider | summ.py | 1 | 9220 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
rks = [
"lib.exp.summ.storage",
"lib.exp.summ"]
reload_mods(rks)
from lib.exp.summ import Mary
mm = Mary()
import warnings
warnings.filterwarnings('ignore')
# <codecell>
#dc = mm.df_classify_perf(key="coverages_9")
#saf = mm.df_rec_ans()
if False:
sd = mm.get_storage()
sd.Classifier = [0, 1, 2]
sd.Preprocess = [170, 167, 178]
sd.Features = [1282, 1322, 1621]
sd.Slides = [40, 38, 42]
sd.Matches = [97, 93, 67]
mm.save("storage", sd)
sd = sd.sort(columns=["dsn"])
sd
# <codecell>
fig = plt.figure(figsize=(18, 5))
#fig.suptitle("Storage Comparisons of 3 dataset(MB)", fontsize=20, y=1.02)
kcrs = ["#335588", "#975533", "#448b35"]
crs = mpl.cm.GnBu(range(30,250, 30))
lbs = filter(lambda k: "dsn" not in k, sd.columns)
explode=(0, 0.1, 0, 0)
for ei, (si, sr) in enumerate(sd.iterrows(), 1):
ax = plt.subplot(1, 3, ei)
dt = sr[lbs]
dts = dt.sum()
exp = (dt.values / (1.*dts))*.2
pa, tx, txa = ax.pie(dt, explode=exp, labels=lbs, autopct='%1.1f%%', colors=crs, startangle=110)
# texts
[t.set_text(t.get_text() + "({})".format(v)) for t, v in zip(tx, dt.values)]
[t.set_text("") for t, v in zip(tx, dt.values) if v == 0]
[t.set_color(kcrs[si]) for t, v in zip(tx, dt.values)]
[t.set_size(18) for t in tx]
[t.set_size(18) for t in txa]
#[t.set_color(kcrs[si]) for t in txa]
# final
ax.set_title("{} ({})".format(sr.dsn, dts), fontsize=32, color=kcrs[si])
fig.savefig("data/fig/mary/storage.eps", transparent=1)
# <codecell>
rks = ["lib.texer.sum_ns_tab", "lib.texer.sum_sf_cov",
"lib.texer"]
reload_mods(rks)
from lib.texer import Texer
tex = Texer()
#print tex.sum_ns_roc(dc[:])
#print tex.sum_ns_cov(dc)
#print tex.sum_sf_cov(dc, cov=False)
# <codecell>
rks = [
"lib.plotter.ax_helper", "lib.plotter.plot_filer",
"lib.plotter.xframes.rec_plot",
"lib.plotter.summ.fig_plot",
"lib.plotter.summ"]
reload_mods(rks)
from lib.plotter.summ import MaryPlotter
mp = MaryPlotter()
# <codecell>
#fig = plt.figure(figsize=(10, 5))
#dc = mp.classfy_perf(fig=fig, ks=range(0, 2))
#fig = plt.figure(figsize=(15.6, 5))
#dc = mp.classfy_perf(fig=fig, ks=range(2, 5))
#fig = plt.figure(figsize=(18, 9))
#mp.rec_timeline(fig=fig, ds=[0], ks=[0,5])
# <codecell>
cc = mm.df_classify_perf(key="coverages_9")
# <codecell>
fig = plt.figure(figsize=(18, 5))
fig.suptitle("Switch Coverages VS Confidence", fontsize=18, y=.99)
for ci, rg in cc.groupby("dsn"):
ax = fig.add_subplot(1, 3, len(fig.axes)+1)
ax.patch.set_visible(False)
#print cg.sensitivity
cg = rg.sort(columns=["fswc"])
#print cg.key.values
ax.plot(cg.fswc, color='r', marker='x', label="Switch Cov.")
ax.plot(cg.tpc, color='g', marker='x', label="TP Confidence")
ax.plot(cg.tnc, color='b', marker='x', label="TN Confidence")
ax.set_title(ci.capitalize(), fontsize=16)
ax.set_ylim(0,1)
leg = ax.legend(loc=0, ncol=2)
leg.get_frame().set_alpha(.2)
tic, tap = -1, -1
ax.set_xlabel("Methods", fontsize=14)
ax.set_ylabel("Switch Coverage Rate", fontsize=14)
for ei, (gi, gd) in enumerate(cg.iterrows()):
if gd.key not in ['v_a_ans', 'v_atm_re_ans', 'v_atmri_ans']:
continue
#print gd
ax.annotate(gd.key[:-4],
xy=(ei, gd.fswc),
xytext=(ei, gd.fswc + gi*0.01*tic), # theta, radius
xycoords='data',
textcoords='data',
fontsize=14,
arrowprops=dict(width=.5, frac=.15, shrink=.9, headwidth=5),
ha='center', va='bottom',
clip_on=False, # clip to the axes bounding box
)
fig.savefig("data/fig/mary/soc.eps", transparent=1)
# <codecell>
from lib.exp.featx import Featx
fx = Featx("univ_07", "coates")
from lib.exp.pairing import PairFeats
pf = PairFeats(fx)
df = fx.load("rtlog")
df = df.drop_duplicates(cols=["key"])
print df.time.sum()
pf.set_matcher()
pf._update_klass_var()
df = pf.load("rs_d80_ra1000_rm10_iw0_ot0_NoHomo_Area")
print df.time.sum()
# <codecell>
td = pd.DataFrame(columns=sd.columns)
for sc in sd.columns:
td[sc] = [0,0,0]
td.Features = [154034.75189208984, 40080.8579922, 190572.567463]
td["Matches"] = [1496278.0277729034, 343546.187878, 1121270.24841]
td["dsn"] = sd.dsn
# <codecell>
def to_mins(ms):
tsec = ms / 1000
sec = tsec % 60
tmins= int(tsec / 60)
mins = tmins % 60
hr = int(tmins / 60)
return "{:02d}:{:02d}:{:02.0f}".format(hr, mins, sec)
def time_pie(fig, df, cols=["Features", "Matches"], fn="time"):
kcrs = ["#335588", "#975533", "#448b35"]
crs = mpl.cm.Pastel2(range(120,250, 50))
explode=(0, 0.1, 0, 0)
for ei, (si, sr) in enumerate(df.iterrows(), 1):
ax = plt.subplot(1, 3, ei)
dt = sr[cols]
dts = dt.sum()
exp = (dt.values / (1.*dts))*.2
pa, tx, txa = ax.pie(dt, explode=exp, labels=cols, autopct='%1.1f%%', colors=crs)
# texts
[a.set_text(a.get_text() + "\n" + t.get_text() + "({})".format(to_mins(v))) for a, t, v in zip(txa, tx, dt.values)]
[t.set_text("") for a, t, v in zip(txa, tx, dt.values)]
[t.set_color(kcrs[si]) for t, v in zip(txa, dt.values)]
[t.set_size(18) for t in txa]
# final
ax.set_title("{} ({})".format(sr.dsn, to_mins(dts)), fontsize=32, color=kcrs[si])
fig.savefig("data/fig/mary/{}.eps".format(fn), transparent=1)
fig = plt.figure(figsize=(18, 5))
time_pie(fig, td)
# <codecell>
td = td.sort(columns=["dsn"])
# <codecell>
ftd = td[:]
ftd["prer"] = [0.0087, 0.0089, 0.0050]
# <codecell>
ftd["All_Features"] = ftd.Features/ftd.prer
ftd["All_Matching"] = ftd.Matches/ftd.prer
#print ftd
fig = plt.figure(figsize=(18, 5))
time_pie(fig, ftd, cols=["All_Features", "All_Matching"], fn="exp_time")
# <codecell>
from lib.exp.summary import Summary
su = Summary()
sud = su.load_summary()
# <codecell>
fmts = dict(Time=to_mins)
sud = sud[sud.n_name.isin(["chaves", "coates", "rozenblit"])]
fcc = ["n_name", "n_frames", "n_slides", "n_time", "v_width", "v_height"]
sdd = sud[fcc]
sdd.columns = [fc[2:].capitalize() for fc in fcc]
sdd.Time = sdd.Time * 1000
sdd["Difficulty"] = ["Mixed background, Animated Slides", "Simpler background, Plain slides", "Lots similar, image slides"]
# <codecell>
print sdd.to_latex(index=0, formatters=fmts)
# <codecell>
cpf = pd.DataFrame(columns=["Critirions", "Proposed", "Fan_11"])
cpf = cpf.append([dict(Critirions="Time($GLOB+LOC$)", Proposed="00:16:27", Fan_11="00:38:40")])
cpf = cpf.append([dict(Critirions="Global Time($GLOB^1$)", Proposed="39:59:14", Fan_11="09:36:24")])
cpf = cpf.append([dict(Critirions="Avg. Accuracy($1-FER^2$)", Proposed="52%", Fan_11="98.2%")])
cpf = cpf.append([dict(Critirions="Best Switch Coverages($1-SER^3$)", Proposed="96.7%", Fan_11="94.4%~98.3%")])
cpf = cpf.append([dict(Critirions="Worst Switch Coverages($1-SER^3$)", Proposed="96.7%", Fan_11="94.4%~98.3%")])
print cpf.to_latex(index=0)
# <codecell>
print to_mins(ftd.Features.mean())
print to_mins(ftd.Matches.mean())
print to_mins(ftd.All_Features.mean())
print to_mins(ftd.All_Matching.mean())
# <codecell>
def bold_negative(v):
if v == -1:
return "STextbfBFSTextcolorBFemphasisBRBF-1BRBR"
else:
return "{}".format(v)
def cp(v):
if v < 1.00:
return "STextitBFSTextcolorBFemphasisBRBF{:4.1f}%BRBR".format(v*100)
else:
return "{:4.1f}%".format(v*100)
#seeds = saf.iloc[randint(0, high=len(saf), size=10)]
fsc = ["fid", "gnd", "area", "top50", "mean", "rmean", "invr"]
fsc = ["fid", "gnd", "v_a_ans", "v_at_ans", "v_atm_ans", "v_atmri_ans", "v_atm_re_ans"]
fsc = ["fid", "gnd", "v_a_conf", "v_at_conf", "v_atm_conf", "v_atmri_conf", "v_atm_re_conf"]
fsd = seeds[fsc].sort(columns=["fid"])
fsd.columns = [f.capitalize() for f in fsd.columns]
bn = bold_negative
#V_at_conf=bn, V_atm_ans=bn, V_atmri_ans=bn)).
print fsd.to_latex(index=0, formatters=dict(
Gnd=bn, V_a_conf=cp, V_at_conf=cp, V_atm_conf=cp, V_atmri_conf=cp, V_atm_re_conf=cp), float_format="{:.2f}".format). \
replace("ST", "\\t").replace("BF", "{").replace("BR", "}").replace("V\\_", "").\
replace("\_ans", "")
# <codecell>
seeds
# <codecell>
from lib.exp.tools.video import Video
from lib.exp.tools.slider import Slider
from lib.exp.featx import Featx
dn = ("univ_07", "coates")
vid = Video(*dn)
slr = Slider(*dn)
fx = Featx(*dn)
fid, sid = 18050, 16
sp = fx.load("s_{:03d}_kps".format(sid))
fp = fx.load("f_{}_kps".format(fid))
vimg = vid.get_frames(ids=[fid]).next()["img"]
simg = slr.get_slides(ids=[sid], resize=(vimg.shape[1], vimg.shape[0])).next()["img"]
# <codecell>
def draw_kps(ax, img, kps, show=1, ty="Frame", iid=18050):
ax.imshow(img[:, :, [2, 1, 0]])
if show:
ax.scatter(kps.x, kps.y, marker="x", color="#55Fe36")
ax.scatter(kps.x, kps.y, marker=".", facecolors="none", edgecolors="#EE5869", s=kps.size*50)
ax.set_xlim(0, img.shape[1])
ax.set_ylim(img.shape[0], 0)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("{}[{}]".format(ty, iid), fontsize=16)
fig = plt.figure(figsize=(18, 6))
ax = fig.add_subplot(122)
draw_kps(ax, vimg, fp, show=1, iid=fid)
ax = fig.add_subplot(121)
draw_kps(ax, simg, sp, show=1, ty="Slide", iid=sid)
fig.savefig("data/fig/mary/sift_after.eps", transparent=1)
# <codecell>
fp.columns
# <codecell>
| agpl-3.0 | 5,366,314,327,825,016,000 | 28.456869 | 123 | 0.62039 | false | 2.375064 | false | false | false |
rogerthat-platform/rogerthat-backend | src/mcfw/exceptions.py | 1 | 2422 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import httplib
class HttpException(Exception):
http_code = 0
def __init__(self, error=None, data=None, **kwargs):
self.data = data or {}
if not error and self.http_code in httplib.responses:
error = httplib.responses[self.http_code]
self.error = error
super(HttpException, self).__init__(self, error, **kwargs)
class HttpBadRequestException(HttpException):
http_code = httplib.BAD_REQUEST
def __init__(self, *args, **kwargs):
super(HttpBadRequestException, self).__init__(*args, **kwargs)
class HttpUnAuthorizedException(HttpException):
http_code = httplib.UNAUTHORIZED
def __init__(self, *args, **kwargs):
super(HttpUnAuthorizedException, self).__init__(*args, **kwargs)
class HttpForbiddenException(HttpException):
http_code = httplib.FORBIDDEN
def __init__(self, *args, **kwargs):
super(HttpForbiddenException, self).__init__(*args, **kwargs)
class HttpNotFoundException(HttpException):
http_code = httplib.NOT_FOUND
def __init__(self, *args, **kwargs):
super(HttpNotFoundException, self).__init__(*args, **kwargs)
class HttpConflictException(HttpException):
http_code = httplib.CONFLICT
def __init__(self, *args, **kwargs):
super(HttpConflictException, self).__init__(*args, **kwargs)
class HttpUnprocessableEntityException(HttpException):
http_code = httplib.UNPROCESSABLE_ENTITY
def __init__(self, *args, **kwargs):
super(HttpUnprocessableEntityException, self).__init__(*args, **kwargs)
class HttpInternalServerErrorException(HttpException):
http_code = httplib.INTERNAL_SERVER_ERROR
def __init__(self, *args, **kwargs):
super(HttpInternalServerErrorException, self).__init__(*args, **kwargs)
| apache-2.0 | 3,057,799,384,206,349,300 | 30.051282 | 79 | 0.691164 | false | 3.957516 | false | false | false |
ingadhoc/odoo-infrastructure | infrastructure/models/mailserver.py | 1 | 1038 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields
class mailserver(models.Model):
""""""
_name = 'infrastructure.mailserver'
_inherit = 'ir.mail_server'
partner_id = fields.Many2one(
'res.partner',
'Partner',
help='If partner is set, then this mailserver will be only availble '
'for this partner databases'
)
external_id = fields.Char(
'External ID',
required=True,
default='infra_stmp_server',
help='External ID used to identify record on record update. It is '
'suggested that all mail servers has the same external id to make the '
'replaceable')
database_ids = fields.One2many(
'infrastructure.database',
'smtp_server_id',
'Databases',
)
| agpl-3.0 | 4,729,004,514,452,739,000 | 31.4375 | 79 | 0.526012 | false | 4.654709 | false | false | false |
noskill/virt-manager | tests/storage.py | 1 | 8815 | # Copyright (C) 2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import logging
import os
import unittest
from virtinst import StoragePool, StorageVolume
from tests import utils
# pylint: disable=protected-access
# Access to protected member, needed to unittest stuff
basepath = os.path.join(os.getcwd(), "tests", "storage-xml")
def generate_uuid_from_string(msg):
res = msg.split("-", 1)
if len(res) > 1:
# Split off common prefix
msg = res[1]
numstr = ""
for c in msg:
numstr += str(ord(c))
numstr *= 32
return "-".join([numstr[0:8], numstr[8:12], numstr[12:16], numstr[16:20],
numstr[20:32]])
def _findFreePoolName(conn, namebase):
i = 0
while True:
poolname = namebase + "-%d" % i
try:
conn.storagePoolLookupByName(poolname)
i += 1
except:
return poolname
def createPool(conn, ptype, poolname=None, fmt=None, target_path=None,
source_path=None, source_name=None, uuid=None, iqn=None):
if poolname is None:
poolname = _findFreePoolName(conn, str(ptype) + "-pool")
if uuid is None:
uuid = generate_uuid_from_string(poolname)
pool_inst = StoragePool(conn)
pool_inst.name = poolname
pool_inst.type = ptype
pool_inst.uuid = uuid
if pool_inst.supports_property("host"):
pool_inst.host = "some.random.hostname"
if pool_inst.supports_property("source_path"):
pool_inst.source_path = source_path or "/some/source/path"
if pool_inst.supports_property("target_path"):
pool_inst.target_path = target_path or "/some/target/path"
if fmt and pool_inst.supports_property("format"):
pool_inst.format = fmt
if source_name and pool_inst.supports_property("source_name"):
pool_inst.source_name = source_name
if iqn and pool_inst.supports_property("iqn"):
pool_inst.iqn = iqn
pool_inst.validate()
return poolCompare(pool_inst)
def poolCompare(pool_inst):
filename = os.path.join(basepath, pool_inst.name + ".xml")
out_expect = pool_inst.get_xml_config()
if not os.path.exists(filename):
open(filename, "w").write(out_expect)
utils.diff_compare(out_expect, filename)
return pool_inst.install(build=True, meter=None, create=True)
def createVol(conn, poolobj, volname=None, input_vol=None, clone_vol=None):
if volname is None:
volname = poolobj.name() + "-vol"
# Format here depends on libvirt-1.2.0 and later
if clone_vol and conn.local_libvirt_version() < 1002000:
logging.debug("skip clone compare")
return
alloc = 5 * 1024 * 1024 * 1024
cap = 10 * 1024 * 1024 * 1024
vol_inst = StorageVolume(conn)
vol_inst.pool = poolobj
vol_inst.name = volname
vol_inst.capacity = cap
vol_inst.allocation = alloc
vol_inst.permissions.mode = "0700"
vol_inst.permissions.owner = "10736"
vol_inst.permissions.group = "10736"
if input_vol:
vol_inst.input_vol = input_vol
vol_inst.sync_input_vol()
elif clone_vol:
vol_inst = StorageVolume(conn, parsexml=clone_vol.XMLDesc(0))
vol_inst.input_vol = clone_vol
vol_inst.sync_input_vol()
vol_inst.name = volname
vol_inst.validate()
filename = os.path.join(basepath, vol_inst.name + ".xml")
utils.diff_compare(vol_inst.get_xml_config(), filename)
return vol_inst.install(meter=False)
class TestStorage(unittest.TestCase):
def setUp(self):
self.conn = utils.open_testdefault()
def testDirPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_DIR, "pool-dir")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testFSPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_FS, "pool-fs")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testNetFSPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_NETFS, "pool-netfs")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testLVPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_LOGICAL,
"pool-logical",
target_path="/dev/pool-logical")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn,
poolobj, volname=invol.name() + "clone", clone_vol=invol)
# Test parsing source name for target path
createPool(self.conn, StoragePool.TYPE_LOGICAL,
"pool-logical-target-srcname",
target_path="/dev/vgfoobar")
# Test with source name
createPool(self.conn,
StoragePool.TYPE_LOGICAL, "pool-logical-srcname",
source_name="vgname")
# Test creating with many devices
# XXX: Need to wire this up
# createPool(self.conn,
# StoragePool.TYPE_LOGICAL, "pool-logical-manydev",
# source_path=["/tmp/path1", "/tmp/path2", "/tmp/path3"],
# target_path=None)
def testDiskPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_DISK,
"pool-disk", fmt="dos")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testISCSIPool(self):
createPool(self.conn,
StoragePool.TYPE_ISCSI, "pool-iscsi",
iqn="foo.bar.baz.iqn")
def testSCSIPool(self):
createPool(self.conn, StoragePool.TYPE_SCSI, "pool-scsi")
def testMpathPool(self):
createPool(self.conn, StoragePool.TYPE_MPATH, "pool-mpath")
def testGlusterPool(self):
if not self.conn.check_support(self.conn.SUPPORT_CONN_POOL_GLUSTERFS):
raise unittest.SkipTest("Gluster pools not supported with this "
"libvirt version.")
createPool(self.conn, StoragePool.TYPE_GLUSTER, "pool-gluster")
def _enumerateCompare(self, name, pool_list):
for pool in pool_list:
pool.name = name + str(pool_list.index(pool))
pool.uuid = generate_uuid_from_string(pool.name)
poolCompare(pool)
def testEnumerateLogical(self):
name = "pool-logical-list"
lst = StoragePool.pool_list_from_sources(self.conn,
StoragePool.TYPE_LOGICAL)
self._enumerateCompare(name, lst)
def testEnumerateNetFS(self):
name = "pool-netfs-list"
host = "example.com"
lst = StoragePool.pool_list_from_sources(self.conn,
StoragePool.TYPE_NETFS,
host=host)
self._enumerateCompare(name, lst)
def testEnumerateiSCSI(self):
host = "example.com"
lst = StoragePool.pool_list_from_sources(self.conn,
StoragePool.TYPE_ISCSI,
host=host)
self.assertTrue(len(lst) == 0)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -4,441,029,140,978,103,300 | 33.980159 | 78 | 0.59705 | false | 3.683661 | true | false | false |
jilljenn/tryalgo | tryalgo/subsetsum_divide.py | 1 | 2217 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Subsetsum by splitting
christoph dürr et jill-jênn vie - 2014-2019
"""
# snip{
def part_sum(x_table, i=0):
"""All subsetsums from x_table[i:]
:param x_table: table of values
:param int i: index_table defining suffix_table of x_table to be considered
:iterates: over all values, in arbitrary order
:complexity: :math:`O(2^{len(x_table)-i})`
"""
if i == len(x_table):
yield 0
else:
for s_idx in part_sum(x_table, i + 1):
yield s_idx
yield s_idx + x_table[i]
def subset_sum(x_table, r_target):
"""Subsetsum by splitting
:param x_table: table of values
:param r_target: target value
:returns bool: if there is a subsequence of x_table with total sum r_target
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x_table) // 2 # divide input
y_value = list(part_sum(x_table[:k]))
z_value = [r_target - v for v in part_sum(x_table[k:])]
y_value.sort() # test of intersection between y_value and z_value
z_value.sort()
i = 0
j = 0
while i < len(y_value) and j < len(z_value):
if y_value[i] == z_value[j]:
return True
if y_value[i] < z_value[j]: # increment index of smallest element
i += 1
else:
j += 1
return False
# snip}
# snip{ subset_sum2
def part_sum2(x_table):
"""All subsetsums from a list x
:param x_table: list of values
:complexity: :math:`O(2^{len(x)})`
"""
answer = set([0]) # 0 = value of empty set
for xi in x_table:
answer |= set(value + xi for value in answer)
return answer
def subset_sum2(x_table, r_target):
"""Subsetsum by splitting
:param x_table: table of values
:param r_target: target value
:returns bool: if there is a subsequence of x_table with total sum r_target
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x_table) // 2 # divide input
y_set = part_sum2(x_table[:k])
z_set = set(r_target - value for value in part_sum2(x_table[k:]))
return len(y_set & z_set) > 0 # test intersection
# snip}
| mit | -4,340,037,461,299,297,300 | 27.766234 | 79 | 0.575169 | false | 3.089261 | false | false | false |
plum-umd/java-sketch | jskparser/ast/body/variabledeclarator.py | 1 | 1508 | #!/usr/bin/env python
from . import _import
from ..node import Node
class VariableDeclarator(Node):
def __init__(self, kwargs={}):
if kwargs.get(u'id', ''):
super(VariableDeclarator, self).__init__(kwargs)
locs = _import()
# VariableDeclaratorId
self._id = locs[u'VariableDeclaratorId'](kwargs.get(u'id', ''))
# Type type
typ = kwargs.get(u'type')
self._typ = locs[typ[u'@t']](typ) if typ else None
# Expression
i = kwargs.get('init', None)
self._init = locs[i[u'@t']](i) if i else None
# if self._init and self.parentNode and not self._typ:
# self._init.typee = self.parentNode.typee
self.add_as_parent([self.idd, self.init])
@property
def idd(self): return self._id
@idd.setter
def idd(self, v): self._id = v
@property
def name(self): return self._id.name
@name.setter
def name(self, v): self._id.name = v
@property
def init(self): return self._init
@init.setter
def init(self, v): self._init = v
@property
def lbl(self): return (self.name, self.ati)
@lbl.setter
def lbl(self, v): self._lbl = v
@property
def typee(self): return self._typ if self._typ else self.parentNode.typee
@typee.setter
def typee(self, v): self._typ = v
def gen(self): return set([self.lbl]) if self.init else set([])
def __str__(self): return str(self.idd)
| mit | 4,094,312,163,630,533,000 | 25.928571 | 77 | 0.56565 | false | 3.466667 | false | false | false |
lithiumoxide/scical | astronomy.py | 1 | 1465 | # astronomy.py 12.10.3
from math import *
G = 6.673e-11
c = 2.998e8
H = 80 # km/s/Mpc
v = 0
relg = 1/(sqrt(1-((v/c)**2)))
def gforce(m1, m2, r):
''' (int, int, int) -> int
Calculates gravitational force between masses m1 and m2 (kg) at a separation of r (m).
'''
global G
return str((G*m1*m2)/(r**2)) + ' N'
def magabs(mapp, d):
''' (number, number) -> float
Return absolute magnitude given apparent magnitude and distance (parsecs), mapp and d.
'''
return str(5 + mapp - (5*math.log(d, 10)))
def magapp(mabs, d):
''' (number, number) -> float
Return apparent magnitude given absolute magnitude and distance (parsecs), mapp and d.
'''
return str((5*math.log(d) - 5) + M)
def luminosity(flux):
''' (number) -> float
Return luminosity of a star at a given distance d, considering its flux.
'''
return str(4*math.pi*(d**2)*flux) + ' W'
def schwradius(m):
''' (number) -> float
Return the Schwarzchild radius of an object of mass m
'''
global G
global c
return str((2*G*m)/(c**2)) + ' m'
def hubblevel(d):
global H
return str(H*d) + ' km/s/Mpc'
def hubbledis(v):
global H
return str(v/H) + ' km/s'
def specrelt(t):
''' (number) -> float
Return relativistic time when given stationary time.
'''
global relg
return str(relg*t) + ' s'
def specrelm(m):
''' Return relativistic mass. '''
global relg
return str(relg*m) + ' kg'
def specrelx(x):
''' Return relativistic length.'''
global relg
return str(x/relg) + ' m' | gpl-3.0 | 8,261,305,374,650,977,000 | 20.246377 | 87 | 0.632765 | false | 2.491497 | false | false | false |
bcicen/multicrane | multicrane/crane.py | 1 | 1817 | #!/usr/bin/env python
import os, yaml, urllib2, logging, termcolor
from sh import crane
from util import randomcolor
log = logging.getLogger()
class CraneConfig(object):
def __init__(self, cranefile):
"""
CraneConfig object
"""
self.txtcolor = randomcolor()
self.cranefile = cranefile
self.docker_host = self._gethost()
self.docker_host_short = self.docker_host.strip('tcp://').split(':')[0]
self.env = os.environ.copy()
self.env['DOCKER_HOST'] = self.docker_host
def is_running(self):
try:
os.kill(self.pid, 0)
except OSError:
return False
return True
def __getattr__(self, name):
p = crane(name, '-c', self.cranefile,
_env=self.env,
_out=self._process_out,
_err=self._process_out,
_out_bufsize=1,
_bg=True)
self.pid = p.pid
log.info('running %s' % p.cmd)
log.debug('call args: %s' % p.call_args)
def _process_out(self,line):
termcolor.cprint(self.docker_host_short + ": " + line.strip('\n'),
self.txtcolor)
def _gethost(self):
cf = yaml.load(open(self.cranefile, 'r'))
#simple validation before returning the docker_host
if not cf.has_key('docker_host'):
raise Exception('docker_host section not found in cranefile %s' %
self.cranefile)
r = urllib2.Request(cf['docker_host'].replace('tcp', 'http') + "/version")
try:
urllib2.urlopen(r).read()
except Exception, e:
log.fatal('unable to reach docker host %s' %
cf['docker_host'])
raise Exception(e)
return cf['docker_host']
| mit | -8,861,030,407,317,800,000 | 30.877193 | 82 | 0.5377 | false | 3.754132 | false | false | false |
leshchevds/ganeti | test/py/testutils_ssh.py | 1 | 28653 | #!/usr/bin/python
#
# Copyright (C) 2010, 2013, 2015 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Helper class to test ssh-related code."""
from ganeti import constants
from ganeti import pathutils
from ganeti import errors
from collections import namedtuple
class FakeSshFileManager(object):
"""Class which 'fakes' the lowest layer of SSH key manipulation.
There are various operations which touch the nodes' SSH keys and their
respective key files (authorized_keys and ganeti_pub_keys). Those are
tedious to test as file operations have to be mocked on different levels
(direct access to the authorized_keys and ganeti_pub_keys) of the master
node, indirect access to those files of the non-master nodes (via the
ssh_update tool). In order to make unit tests of those operations more
readable and managable, we introduce this class, which mocks all
direct and indirect access to SSH key files on all nodes. This way,
the state of this FakeSshFileManager represents the state of a cluster's
nodes' SSH key files in a consise and easily accessible way.
"""
def __init__(self):
# Dictionary mapping node name to node properties. The properties
# are a named tuple of (node_uuid, ssh_key, is_potential_master_candidate,
# is_master_candidate, is_master).
self._all_node_data = {}
# Dictionary emulating the authorized keys files of all nodes. The
# indices of the dictionary are the node names, the values are sets
# of keys (strings).
self._authorized_keys = {}
# Dictionary emulating the public keys file of all nodes. The indices
# of the dictionary are the node names where the public key file is
# 'located' (if it wasn't faked). The values of the dictionary are
# dictionaries itself. Each of those dictionaries is indexed by the
# node UUIDs mapping to a list of public keys.
self._public_keys = {} # dict of dicts
# Node name of the master node
self._master_node_name = None
# Dictionary mapping nodes by name to number of retries where 'RunCommand'
# succeeds. For example if set to '3', RunCommand will fail two times when
# called for this node before it succeeds in the 3rd retry.
self._max_retries = {}
# Dictionary mapping nodes by name to number of retries which
# 'RunCommand' has already carried out.
self._retries = {}
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
_NodeInfo = namedtuple(
"NodeInfo",
["uuid",
"key",
"is_potential_master_candidate",
"is_master_candidate",
"is_master"])
def _SetMasterNodeName(self):
self._master_node_name = [name for name, node_info
in self._all_node_data.items()
if node_info.is_master][0]
def GetMasterNodeName(self):
return self._master_node_name
def _CreateNodeDict(self, num_nodes, num_pot_mcs, num_mcs):
"""Creates a dictionary of all nodes and their properties."""
self._all_node_data = {}
for i in range(num_nodes):
name = "node_name_%i" % i
uuid = "node_uuid_%i" % i
key = "key%s" % i
self._public_keys[name] = {}
self._authorized_keys[name] = set()
pot_mc = i < num_pot_mcs
mc = i < num_mcs
master = i == num_mcs / 2
self._all_node_data[name] = self._NodeInfo(uuid, key, pot_mc, mc, master)
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def _FillPublicKeyOfOneNode(self, receiving_node_name):
node_info = self._all_node_data[receiving_node_name]
# Nodes which are not potential master candidates receive no keys
if not node_info.is_potential_master_candidate:
return
for node_info in self._all_node_data.values():
if node_info.is_potential_master_candidate:
self._public_keys[receiving_node_name][node_info.uuid] = [node_info.key]
def _FillAuthorizedKeyOfOneNode(self, receiving_node_name):
for node_name, node_info in self._all_node_data.items():
if node_info.is_master_candidate \
or node_name == receiving_node_name:
self._authorized_keys[receiving_node_name].add(node_info.key)
def InitAllNodes(self, num_nodes, num_pot_mcs, num_mcs):
"""Initializes the entire state of the cluster wrt SSH keys.
@type num_nodes: int
@param num_nodes: number of nodes in the cluster
@type num_pot_mcs: int
@param num_pot_mcs: number of potential master candidates in the cluster
@type num_mcs: in
@param num_mcs: number of master candidates in the cluster.
"""
self._public_keys = {}
self._authorized_keys = {}
self._CreateNodeDict(num_nodes, num_pot_mcs, num_mcs)
for node in self._all_node_data.keys():
self._FillPublicKeyOfOneNode(node)
self._FillAuthorizedKeyOfOneNode(node)
self._SetMasterNodeName()
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def SetMaxRetries(self, node_name, retries):
"""Set the number of unsuccessful retries of 'RunCommand' per node.
@type node_name: string
@param node_name: name of the node
@type retries: integer
@param retries: number of unsuccessful retries
"""
self._max_retries[node_name] = retries
def GetSshPortMap(self, port):
"""Creates a SSH port map with all nodes mapped to the given port.
@type port: int
@param port: SSH port number for all nodes
"""
port_map = {}
for node in self._all_node_data.keys():
port_map[node] = port
return port_map
def GetAllNodeNames(self):
"""Returns all node names of the cluster.
@rtype: list of str
@returns: list of all node names
"""
return self._all_node_data.keys()
def GetAllNodeUuids(self):
"""Returns all node UUIDs of the cluster.
@rtype: list of str
@returns: list of all node UUIDs
"""
return [node.uuid for node in self._all_node_data.values()]
def GetAllPotentialMasterCandidateNodeNames(self):
return [name for name, node_info
in self._all_node_data.items()
if node_info.is_potential_master_candidate]
def GetAllMasterCandidateUuids(self):
return [node_info.uuid for node_info
in self._all_node_data.values() if node_info.is_master_candidate]
def GetAllPurePotentialMasterCandidates(self):
"""Get the potential master candidates which are not master candidates.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information of nodes
which are potential master candidates but not master
candidates
"""
return [(name, node_info) for name, node_info
in self._all_node_data.items()
if node_info.is_potential_master_candidate and
not node_info.is_master_candidate]
def GetAllMasterCandidates(self):
"""Get all master candidate nodes.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information of master
candidate nodes.
"""
return [(name, node_info) for name, node_info
in self._all_node_data.items() if node_info.is_master_candidate]
def GetAllNormalNodes(self):
"""Get all normal nodes.
Normal nodes are nodes that are neither master, master candidate nor
potential master candidate.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information of normal
nodes
"""
return [(name, node_info) for name, node_info
in self._all_node_data.items() if not node_info.is_master_candidate
and not node_info.is_potential_master_candidate]
def GetAllNodesDiverse(self):
"""This returns all nodes in a diverse order.
This will return all nodes, but makes sure that they are ordered so that
the list will contain in a round-robin fashion, a master candidate,
a potential master candidate, a normal node, then again a master
candidate, etc.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information
"""
master_candidates = self.GetAllMasterCandidates()
potential_master_candidates = self.GetAllPurePotentialMasterCandidates()
normal_nodes = self.GetAllNormalNodes()
mixed_list = []
i = 0
assert (len(self._all_node_data) == len(master_candidates)
+ len(potential_master_candidates) + len(normal_nodes))
while len(mixed_list) < len(self._all_node_data):
if i % 3 == 0:
if master_candidates:
mixed_list.append(master_candidates[0])
master_candidates = master_candidates[1:]
elif i % 3 == 1:
if potential_master_candidates:
mixed_list.append(potential_master_candidates[0])
potential_master_candidates = potential_master_candidates[1:]
else: # i % 3 == 2
if normal_nodes:
mixed_list.append(normal_nodes[0])
normal_nodes = normal_nodes[1:]
i += 1
return mixed_list
def GetPublicKeysOfNode(self, node):
"""Returns the public keys that are stored on the given node.
@rtype: dict of str to list of str
@returns: a mapping of node names to a list of public keys
"""
return self._public_keys[node]
def GetAuthorizedKeysOfNode(self, node):
"""Returns the authorized keys of the given node.
@type node: string
@param node: name of the node
@rtype: list of str
@returns: a list of authorized keys that are stored on that node
"""
return self._authorized_keys[node]
def GetKeyOfNode(self, node):
"""Returns the SSH key of the given node.
@type node: string
@param node: name of the node
@rtype: string
@returns: the SSH key of the node
"""
return self._all_node_data[node].key
def SetOrAddNode(self, name, uuid, key, pot_mc, mc, master):
"""Adds a new node to the state of the file manager.
This is necessary when testing to add new nodes to the cluster. Otherwise
this new node's state would not be evaluated properly with the assertion
functions.
@type name: string
@param name: name of the new node
@type uuid: string
@param uuid: UUID of the new node
@type key: string
@param key: SSH key of the new node
@type pot_mc: boolean
@param pot_mc: whether the new node is a potential master candidate
@type mc: boolean
@param mc: whether the new node is a master candidate
@type master: boolean
@param master: whether the new node is the master
"""
self._all_node_data[name] = self._NodeInfo(uuid, key, pot_mc, mc, master)
if name not in self._authorized_keys:
self._authorized_keys[name] = set()
if mc:
self._authorized_keys[name].add(key)
if name not in self._public_keys:
self._public_keys[name] = {}
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def NodeHasPublicKey(self, file_node_name, key_node_uuid, key):
"""Checks whether a node has another node's public key.
@type file_node_name: string
@param file_node_name: name of the node whose public key file is inspected
@type key_node_uuid: string
@param key_node_uuid: UUID of the node whose key is checked for
@rtype: boolean
@return: True if the key_node's UUID is found with the machting key 'key'
"""
for (node_uuid, pub_keys) in self._public_keys[file_node_name].items():
if key in pub_keys and key_node_uuid == node_uuid:
return True
return False
def NodeHasAuthorizedKey(self, file_node_name, key):
"""Checks whether a node has a particular key in its authorized_keys file.
@type file_node_name: string
@param file_node_name: name of the node whose authorized_key file is
inspected
@type key: string
@param key: key which is expected to be found in the node's authorized_key
file
@rtype: boolean
@return: True if the key is found in the node's authorized_key file
"""
return key in self._authorized_keys[file_node_name]
def AssertNodeSetOnlyHasAuthorizedKey(self, node_set, query_node_key):
"""Check if nodes in the given set only have a particular authorized key.
@type node_set: list of strings
@param node_set: list of nodes who are supposed to have the key
@type query_node_key: string
@param query_node_key: key which is looked for
"""
assert isinstance(node_set, list)
for node_name in self._all_node_data.keys():
if node_name in node_set:
if not self.NodeHasAuthorizedKey(node_name, query_node_key):
raise Exception("Node '%s' does not have authorized key '%s'."
% (node_name, query_node_key))
else:
if self.NodeHasAuthorizedKey(node_name, query_node_key):
raise Exception("Node '%s' has authorized key '%s' although it"
" should not." % (node_name, query_node_key))
def AssertAllNodesHaveAuthorizedKey(self, key):
"""Check if all nodes have a particular key in their auth. keys file.
@type key: string
@param key: key exptected to be present in all node's authorized_keys file
@raise Exception: if a node does not have the authorized key.
"""
self.AssertNodeSetOnlyHasAuthorizedKey(self._all_node_data.keys(), key)
def AssertNoNodeHasAuthorizedKey(self, key):
"""Check if none of the nodes has a particular key in their auth. keys file.
@type key: string
@param key: key exptected to be present in all node's authorized_keys file
@raise Exception: if a node *does* have the authorized key.
"""
self.AssertNodeSetOnlyHasAuthorizedKey([], key)
def AssertNodeSetOnlyHasPublicKey(self, node_set, query_node_uuid,
query_node_key):
"""Check if nodes in the given set only have a particular public key.
@type node_set: list of strings
@param node_set: list of nodes who are supposed to have the key
@type query_node_uuid: string
@param query_node_uuid: uuid of the node whose key is looked for
@type query_node_key: string
@param query_node_key: key which is looked for
"""
for node_name in self._all_node_data.keys():
if node_name in node_set:
if not self.NodeHasPublicKey(node_name, query_node_uuid,
query_node_key):
raise Exception("Node '%s' does not have public key '%s' of node"
" '%s'." % (node_name, query_node_key,
query_node_uuid))
else:
if self.NodeHasPublicKey(node_name, query_node_uuid, query_node_key):
raise Exception("Node '%s' has public key '%s' of node"
" '%s' although it should not."
% (node_name, query_node_key, query_node_uuid))
def AssertNoNodeHasPublicKey(self, uuid, key):
"""Check if none of the nodes have the given public key in their file.
@type uuid: string
@param uuid: UUID of the node whose key is looked for
@raise Exception: if a node *does* have the public key.
"""
self.AssertNodeSetOnlyHasPublicKey([], uuid, key)
def AssertPotentialMasterCandidatesOnlyHavePublicKey(self, query_node_name):
"""Checks if the node's key is on all potential master candidates only.
This ensures that the node's key is in all public key files of all
potential master candidates, and it also checks whether the key is
*not* in all other nodes's key files.
@param query_node_name: name of the node whose key is expected to be
in the public key file of all potential master
candidates
@type query_node_name: string
@raise Exception: when a potential master candidate does not have
the public key or a normal node *does* have a public key.
"""
query_node_uuid, query_node_key, _, _, _ = \
self._all_node_data[query_node_name]
potential_master_candidates = self.GetAllPotentialMasterCandidateNodeNames()
self.AssertNodeSetOnlyHasPublicKey(
potential_master_candidates, query_node_uuid, query_node_key)
def _AssertTypePublicKeys(self):
"""Asserts that the public key dictionary has the right types.
This is helpful as an invariant that shall not be violated during the
tests due to type errors.
"""
assert isinstance(self._public_keys, dict)
for node_file, pub_keys in self._public_keys.items():
assert isinstance(node_file, str)
assert isinstance(pub_keys, dict)
for node_key, keys in pub_keys.items():
assert isinstance(node_key, str)
assert isinstance(keys, list)
for key in keys:
assert isinstance(key, str)
def _AssertTypeAuthorizedKeys(self):
"""Asserts that the authorized keys dictionary has the right types.
This is useful to check as an invariant that is not supposed to be violated
during the tests.
"""
assert isinstance(self._authorized_keys, dict)
for node_file, auth_keys in self._authorized_keys.items():
assert isinstance(node_file, str)
assert isinstance(auth_keys, set)
for key in auth_keys:
assert isinstance(key, str)
# Disabling a pylint warning about unused parameters. Those need
# to be here to properly mock the real methods.
# pylint: disable=W0613
def RunCommand(self, cluster_name, node, base_cmd, port, data,
debug=False, verbose=False, use_cluster_key=False,
ask_key=False, strict_host_check=False,
ensure_version=False):
"""This emulates ssh.RunSshCmdWithStdin calling ssh_update.
While in real SSH operations, ssh.RunSshCmdWithStdin is called
with the command ssh_update to manipulate a remote node's SSH
key files (authorized_keys and ganeti_pub_key) file, this method
emulates the operation by manipulating only its internal dictionaries
of SSH keys. No actual key files of any node is touched.
"""
if node in self._max_retries:
if node not in self._retries:
self._retries[node] = 0
self._retries[node] += 1
if self._retries[node] < self._max_retries[node]:
raise errors.OpExecError("(Fake) SSH connection to node '%s' failed."
% node)
assert base_cmd == pathutils.SSH_UPDATE
if constants.SSHS_SSH_AUTHORIZED_KEYS in data:
instructions_auth = data[constants.SSHS_SSH_AUTHORIZED_KEYS]
self._HandleAuthorizedKeys(instructions_auth, node)
if constants.SSHS_SSH_PUBLIC_KEYS in data:
instructions_pub = data[constants.SSHS_SSH_PUBLIC_KEYS]
self._HandlePublicKeys(instructions_pub, node)
if constants.SSHS_GENERATE in data:
instructions_generate = data[constants.SSHS_GENERATE]
self._GenerateNewKey(instructions_generate, node)
# pylint: enable=W0613
def _GenerateNewKey(self, instructions_generate, node):
"""Generates a new key for the given node.
Note that this is a very rudimentary generation of a new key. The key is
always generated with the same pattern, starting with 'new_key'. That
means if you run it twice, it will actually produce the same key. However,
for what we want to test, this is sufficient.
The 'suffix' instruction is also ignored and the key is directly overriden.
This works so far, but simplifies the tests a bit. It might be extended
in case it becomes necessary.
@type instructions_generate: tuple of (string, integer, string)
@param instructions_generate: an instructions tuple for generating a new
SSH key. This has to comply to the C{_DATA_CHECK} description in
C{ssh_update.py}.
@type node: string
@param node: name of node
"""
(key_type, key_bits, suffix) = instructions_generate
assert key_type in constants.SSHK_ALL
assert key_bits > 0
assert isinstance(suffix, str)
new_key = "new_key_%s" % node
old_node_data = self._all_node_data[node]
new_node_data = self._NodeInfo(
uuid=old_node_data.uuid,
key=new_key,
is_potential_master_candidate=old_node_data
.is_potential_master_candidate,
is_master_candidate=old_node_data.is_master_candidate,
is_master=old_node_data.is_master)
self._all_node_data[node] = new_node_data
def _EnsureAuthKeyFile(self, file_node_name):
if file_node_name not in self._authorized_keys:
self._authorized_keys[file_node_name] = set()
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def _AddAuthorizedKeys(self, file_node_name, ssh_keys):
"""Mocks adding the given keys to the authorized_keys file."""
assert isinstance(ssh_keys, list)
self._EnsureAuthKeyFile(file_node_name)
for key in ssh_keys:
self._authorized_keys[file_node_name].add(key)
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def _RemoveAuthorizedKeys(self, file_node_name, keys):
"""Mocks removing the keys from authorized_keys on the given node.
@param keys: list of ssh keys
@type keys: list of strings
"""
self._EnsureAuthKeyFile(file_node_name)
self._authorized_keys[file_node_name] = \
set([k for k in self._authorized_keys[file_node_name] if k not in keys])
self._AssertTypeAuthorizedKeys()
def _HandleAuthorizedKeys(self, instructions, node):
(action, authorized_keys) = instructions
ssh_key_sets = authorized_keys.values()
if action == constants.SSHS_ADD:
for ssh_keys in ssh_key_sets:
self._AddAuthorizedKeys(node, ssh_keys)
elif action == constants.SSHS_REMOVE:
for ssh_keys in ssh_key_sets:
self._RemoveAuthorizedKeys(node, ssh_keys)
else:
raise Exception("Unsupported action: %s" % action)
self._AssertTypeAuthorizedKeys()
def _EnsurePublicKeyFile(self, file_node_name):
if file_node_name not in self._public_keys:
self._public_keys[file_node_name] = {}
self._AssertTypePublicKeys()
def _ClearPublicKeys(self, file_node_name):
self._public_keys[file_node_name] = {}
self._AssertTypePublicKeys()
def _OverridePublicKeys(self, ssh_keys, file_node_name):
assert isinstance(ssh_keys, dict)
self._ClearPublicKeys(file_node_name)
for key_node_uuid, node_keys in ssh_keys.items():
assert isinstance(node_keys, list)
if key_node_uuid in self._public_keys[file_node_name]:
raise Exception("Duplicate node in ssh_update data.")
self._public_keys[file_node_name][key_node_uuid] = node_keys
self._AssertTypePublicKeys()
def _ReplaceOrAddPublicKeys(self, public_keys, file_node_name):
assert isinstance(public_keys, dict)
self._EnsurePublicKeyFile(file_node_name)
for key_node_uuid, keys in public_keys.items():
assert isinstance(keys, list)
self._public_keys[file_node_name][key_node_uuid] = keys
self._AssertTypePublicKeys()
def _RemovePublicKeys(self, public_keys, file_node_name):
assert isinstance(public_keys, dict)
self._EnsurePublicKeyFile(file_node_name)
for key_node_uuid, _ in public_keys.items():
if key_node_uuid in self._public_keys[file_node_name]:
self._public_keys[file_node_name][key_node_uuid] = []
self._AssertTypePublicKeys()
def _HandlePublicKeys(self, instructions, node):
(action, public_keys) = instructions
if action == constants.SSHS_OVERRIDE:
self._OverridePublicKeys(public_keys, node)
elif action == constants.SSHS_ADD:
self._ReplaceOrAddPublicKeys(public_keys, node)
elif action == constants.SSHS_REPLACE_OR_ADD:
self._ReplaceOrAddPublicKeys(public_keys, node)
elif action == constants.SSHS_REMOVE:
self._RemovePublicKeys(public_keys, node)
elif action == constants.SSHS_CLEAR:
self._ClearPublicKeys(node)
else:
raise Exception("Unsupported action: %s." % action)
self._AssertTypePublicKeys()
# pylint: disable=W0613
def AddAuthorizedKeys(self, file_obj, keys):
"""Emulates ssh.AddAuthorizedKeys on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.AddAuthorizedKeys}
"""
assert isinstance(keys, list)
assert self._master_node_name
self._AddAuthorizedKeys(self._master_node_name, keys)
self._AssertTypeAuthorizedKeys()
def RemoveAuthorizedKeys(self, file_name, keys):
"""Emulates ssh.RemoveAuthorizeKeys on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.RemoveAuthorizedKeys}
"""
assert isinstance(keys, list)
assert self._master_node_name
self._RemoveAuthorizedKeys(self._master_node_name, keys)
self._AssertTypeAuthorizedKeys()
def AddPublicKey(self, new_uuid, new_key, **kwargs):
"""Emulates ssh.AddPublicKey on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.AddPublicKey}
"""
assert self._master_node_name
assert isinstance(new_key, str)
key_dict = {new_uuid: [new_key]}
self._ReplaceOrAddPublicKeys(key_dict, self._master_node_name)
self._AssertTypePublicKeys()
def RemovePublicKey(self, target_uuid, **kwargs):
"""Emulates ssh.RemovePublicKey on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: {ssh.RemovePublicKey}
"""
assert self._master_node_name
key_dict = {target_uuid: []}
self._RemovePublicKeys(key_dict, self._master_node_name)
self._AssertTypePublicKeys()
def QueryPubKeyFile(self, target_uuids, **kwargs):
"""Emulates ssh.QueryPubKeyFile on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.QueryPubKey}
"""
assert self._master_node_name
all_keys = target_uuids is None
if all_keys:
return self._public_keys[self._master_node_name]
if isinstance(target_uuids, str):
target_uuids = [target_uuids]
result_dict = {}
for key_node_uuid, keys in \
self._public_keys[self._master_node_name].items():
if key_node_uuid in target_uuids:
result_dict[key_node_uuid] = keys
self._AssertTypePublicKeys()
return result_dict
def ReplaceNameByUuid(self, node_uuid, node_name, **kwargs):
"""Emulates ssh.ReplaceNameByUuid on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.ReplacenameByUuid}
"""
assert isinstance(node_uuid, str)
assert isinstance(node_name, str)
assert self._master_node_name
if node_name in self._public_keys[self._master_node_name]:
self._public_keys[self._master_node_name][node_uuid] = \
self._public_keys[self._master_node_name][node_name][:]
del self._public_keys[self._master_node_name][node_name]
self._AssertTypePublicKeys()
# pylint: enable=W0613
| bsd-2-clause | 3,561,939,545,193,350,700 | 36.55308 | 80 | 0.679789 | false | 3.862111 | false | false | false |
USGSDenverPychron/pychron | pychron/experiment/image_browser.py | 1 | 9889 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.api import ArrayPlotData, Plot, HPlotContainer
from chaco.tools.api import ZoomTool, PanTool
from chaco.tools.image_inspector_tool import ImageInspectorOverlay, \
ImageInspectorTool
from enable.component import Component
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, List, Str, Bool, on_trait_change, String, \
Button, Dict, Any
from traitsui.api import View, Item, ListStrEditor, HGroup, VGroup, \
spring, VSplit, Group
# ============= standard library imports ========================
import Image
from numpy import array
import os
import httplib
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.paths import paths
PORT = 8083
# TEST_IMAGE = Image.open(open('/Users/ross/Sandbox/snapshot001.jpg'))
# TEST_IMAGE = ImageData.fromfile('/Users/ross/Sandbox/foo.png')
class ImageContainer(HasTraits):
container = Instance(HPlotContainer, ())
name = String
def traits_view(self):
v = View(VGroup(
HGroup(spring, CustomLabel('name', color='maroon', size=16,
height=-25,
width=100,
), spring),
Item('container', show_label=False, editor=ComponentEditor()),
))
return v
class ImageSpec(HasTraits):
name = Str
note = Str
def traits_view(self):
v = View(VGroup(Item('name'),
Group(
Item('note', style='custom', show_label=False),
show_border=True,
label='Note'
)
)
)
return v
class ImageEditor(HasTraits):
names = List
selected = Str
save_db = Button('Save to DB')
image_spec = Instance(ImageSpec)
image_specs = Dict
db = Any
# ===============================================================================
# handlers
# ===============================================================================
def _selected_changed(self):
if self.selected in self.image_specs:
spec = self.image_specs[self.selected]
else:
spec = ImageSpec(name=self.selected)
self.image_specs[self.selected] = spec
self.image_spec = spec
def _save_db_fired(self):
db = self.db
print db
def traits_view(self):
v = View(
VSplit(
Item('names', show_label=False,
editor=ListStrEditor(editable=False,
selected='selected',
operations=[]
),
height=0.6
),
Item('image_spec', show_label=False, style='custom',
height=0.4
)
),
Item('save_db', show_label=False)
)
return v
class ImageBrowser(IsotopeDatabaseManager):
# db = Instance(IsotopeAdapter)
image_container = Instance(ImageContainer, ())
image_editor = Instance(ImageEditor)
plot = Instance(Component)
# names = List
# selected = Str
use_cache = Bool(True)
cache_dir = paths.image_cache_dir
_conn = None
def _image_editor_default(self):
im = ImageEditor(db=self.db)
return im
def _is_cached(self, p):
p = os.path.join(self.cache_dir, p)
return os.path.isfile(p)
def load_from_remote_source(self, name):
if self._is_cached(name):
data = self._get_cached(name)
else:
data = self._get_remote_file(name)
self._load_image_data(data)
def load_remote_directory(self, name):
self.info('retrieve contents of remote directory {}'.format(name))
resp = self._get(name)
if resp:
htxt = resp.read()
for li in htxt.split('\n'):
if li.startswith('<li>'):
args = li[4:].split('>')
name, _tail = args[1].split('<')
self.image_editor.names.append(name)
return True
def _connection_factory(self, reset=False):
if reset or self._conn is None:
host, port = 'localhost', 8081
url = '{}:{}'.format(host, port)
conn = httplib.HTTPConnection(url)
else:
conn = self._conn
self._conn = conn
return conn
# def _get(self, name):
# conn = self._connection_factory()
# conn.request('GET', '/{}'.format(name))
# return conn.getresponse()
# def _get_remote_file(self, name):
# self.info('retrieve {} from remote directory'.format(name))
# resp = self._get(name)
#
# buf = StringIO()
# buf.write(resp.read())
# buf.seek(0)
# im = Image.open(buf)
# im = im.convert('RGB')
#
# if self.use_cache:
# buf.seek(0)
# if os.path.isdir(self.cache_dir):
# with open(os.path.join(self.cache_dir, name), 'w') as fp:
# fp.write(buf.read())
# else:
# self.info('cache directory does not exist. {}'.format(self.cache_dir))
#
# buf.close()
#
# return array(im)
def _get_cached(self, name):
self.info('retrieve {} from cache directory'.format(name))
p = os.path.join(self.cache_dir, name)
with open(p, 'r') as rfile:
im = Image.open(rfile)
im = im.convert('RGB')
return array(im)
def _load_image_data(self, data):
cont = HPlotContainer()
pd = ArrayPlotData()
plot = Plot(data=pd, padding=[30, 5, 5, 30], default_origin='top left')
pd.set_data('img', data)
img_plot = plot.img_plot('img',
)[0]
self._add_inspector(img_plot)
self._add_tools(img_plot)
cont.add(plot)
cont.request_redraw()
self.image_container.container = cont
def _add_inspector(self, img_plot):
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
overlay = ImageInspectorOverlay(component=img_plot, image_inspector=imgtool,
bgcolor="white", border_visible=True)
img_plot.overlays.append(overlay)
#
def _add_tools(self, img_plot):
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
pan = PanTool(component=img_plot, restrict_to_data=True)
img_plot.tools.append(pan)
img_plot.overlays.append(zoom)
# ===============================================================================
# handlers
# ===============================================================================
@on_trait_change('image_editor:selected')
def _selected_changed(self):
sel = self.image_editor.selected
if sel:
self.load_from_remote_source(sel)
self.image_container.name = sel
def traits_view(self):
v = View(
HGroup(
Item('image_editor', show_label=False, style='custom',
width=0.3
),
# Item('names', show_label=False, editor=ListStrEditor(editable=False,
# selected='selected',
# operations=[]
# ),
# width=0.3,
# ),
Item('image_container', style='custom',
width=0.7,
show_label=False)
),
# Item('container', show_label=False,
# width=0.7,
# editor=ComponentEditor())),
resizable=True,
height=800,
width=900
)
return v
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('image_viewer')
im = ImageBrowser(cache_dir='/Users/ross/Sandbox/cache')
im.load_remote_directory('')
# im.load_from_remote_source('raster2.png')
# im.load_remote_directory()
# im.names = 'snapshot001.jpg,snapshot002.jpg,snapshot003.jpg,snapshot004.jpg'.split(',')
# im.load_from_remote_source('foo')
# im.load_image_from_file('/Users/ross/Sandbox/diodefailsnapshot.jpg')
im.configure_traits()
# ============= EOF =============================================
| apache-2.0 | -8,116,230,646,335,603,000 | 34.067376 | 94 | 0.494388 | false | 4.303307 | false | false | false |
bioinformatics-IBCH/logloss-beraf | logloss_beraf/model_ops/trainer.py | 1 | 12714 | # coding=utf-8
import copy
import logging
import os
# https://github.com/matplotlib/matplotlib/issues/3466/#issuecomment-195899517
import itertools
import matplotlib
matplotlib.use('agg')
import numpy as np
import pandas
from sklearn import (
preprocessing,
model_selection,
)
from sklearn.cross_validation import (
LeaveOneOut,
StratifiedKFold,
)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RandomizedLogisticRegression
import cPickle as pickle
from utils.constants import (
PREFILTER_PCA_PLOT_NAME,
POSTFILTER_PCA_PLOT_NAME,
FEATURE_IMPORTANCE_PLOT_NAME,
FEATURE_COLUMN,
FEATURE_IMPORTANCE_COLUMN,
TRAINED_MODEL_NAME,
)
from visualization.plotting import plot_pca_by_annotation
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
from settings import logger
class LLBModelTrainer(object):
"""
Class implementing main steps of the algorithm:
1. Initial regions filtering with a user-specified delta beta-values threshold
2. Applying randomized logistic regression in order to additionally pre-filter input regions
3. Extracting highly correlated sites
4. Reconstructing logloss function on the interval of user specified limit of number of sites
5. Detecting optimal panel of regions and training final model
Also does some visualizations
"""
def __init__(self, threads=0, max_num_of_features=20,
cv_method="SKFold", class_weights="balanced", final_clf_estimators_num=3000,
intermediate_clf_estimators_num=1000, logloss_estimates=50, min_beta_threshold=0.2,
rr_iterations=5000, correlation_threshold=0.85, output_folder=None):
"""
:param threads:
:type threads: int
:param max_num_of_features: maximum number of features a model can contain
:type max_num_of_features: int
:param cv_method: Supported cross-validation methods: "LOO", "SKFold"
:type cv_method: str
:param class_weights: Class balancing strategy
:type class_weights: dict, str
:param final_clf_estimators_num: number of estimators used in a final classifier
:type final_clf_estimators_num: int
:param intermediate_clf_estimators_num: number of estimators used in intermediate classifiers
:type intermediate_clf_estimators_num: int
:param logloss_estimates: Number of LogLoss estimates on number of sites limited interval
:type logloss_estimates: int
:param min_beta_threshold: Minimum beta-values difference threshold
:type min_beta_threshold: float
:param rr_iterations: Number of randomized regression iterations
"""
self.threads = threads
self.max_num_of_features = max_num_of_features
self.min_beta_threshold = min_beta_threshold
# train process configuration
self.cv_method = cv_method
self.class_weights = class_weights
self.final_clf_estimators_num = final_clf_estimators_num
self.intermediate_clf_estimators_num = intermediate_clf_estimators_num
self.rr_iterations = rr_iterations
self.logloss_estimates = logloss_estimates
# common
self.correlation_threshold = correlation_threshold
self.output_folder = output_folder if output_folder is not None else "results"
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
def _run_randomized_regression(self, feature_df, annotation, clinical_column, sample_fraction=0.7):
annotation = copy.deepcopy(annotation)
# Encode labels of the classes
le = preprocessing.LabelEncoder()
annotation[clinical_column] = le.fit_transform(annotation[clinical_column])
clf = RandomizedLogisticRegression(
n_resampling=self.rr_iterations,
sample_fraction=sample_fraction,
n_jobs=1,
verbose=1,
).fit(feature_df, annotation[clinical_column])
selected_features = feature_df.T[clf.scores_ != 0].index
logger.info("Number of selected features: %d", len(selected_features))
return selected_features, clf
def _train_clf(self, X, y, n_estimators=10):
clf = RandomForestClassifier(n_estimators, n_jobs=self.threads, class_weight=self.class_weights)
scores = scores_accuracy = np.array([0])
cv_algo = None
if self.cv_method is not None:
if self.cv_method == "LOO":
cv_algo = LeaveOneOut(len(y))
elif self.cv_method == "SKFold":
cv_algo = StratifiedKFold(y)
logger.info("Running cross-validation...")
scores = model_selection.cross_val_score(
clf,
X,
y,
cv=cv_algo,
scoring='neg_log_loss',
n_jobs=self.threads,
verbose=1,
)
clf.fit(X, y)
return clf, scores.mean(), scores.std()
def _describe_and_filter_regions(self, basic_region_df, annotation, clinical_column, sample_name_column):
logger.info("Initial number of regions: {0}".format(basic_region_df.shape))
# Initial filtering based on min_beta_threshold
class_combinations = itertools.combinations(annotation[clinical_column].unique(), 2)
for combination in class_combinations:
first_class_samples = annotation[annotation[clinical_column] == combination[0]][sample_name_column]
second_class_samples = annotation[annotation[clinical_column] == combination[1]][sample_name_column]
mean_difference = (basic_region_df.loc[first_class_samples].mean()
- basic_region_df.loc[second_class_samples].mean())
basic_region_df = basic_region_df[mean_difference[abs(mean_difference) > self.min_beta_threshold].index.tolist()]
basic_region_df = basic_region_df.dropna(how="any", axis=1)
logger.info("Number of features after initial filtration: {0}".format(basic_region_df.shape))
plot_pca_by_annotation(
basic_region_df,
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, PREFILTER_PCA_PLOT_NAME),
)
logger.info("Starting feature selection with RLR...")
selected_features, model = self._run_randomized_regression(
basic_region_df,
annotation,
clinical_column,
)
plot_pca_by_annotation(
basic_region_df[selected_features],
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, POSTFILTER_PCA_PLOT_NAME),
)
return selected_features, model
def plot_fi_distribution(self, feature_importances):
ax = feature_importances[FEATURE_IMPORTANCE_COLUMN].hist()
ax.set_xlabel("Feature Importance")
ax.set_ylabel("Number of features")
fig = ax.get_figure()
fig.savefig(os.path.join(self.output_folder, FEATURE_IMPORTANCE_PLOT_NAME))
def _apply_feature_imp_thresh(self, features, feature_imp, thresh):
return [
feature[0] for feature in
zip(features.values, feature_imp)
if feature[1] > thresh
]
def get_threshold(self, logloss_df):
# Standard error
ll_se = logloss_df["mean"].std() / np.sqrt(len(logloss_df["mean"]))
# Restricting search to desired number of features.
logloss_df = logloss_df[logloss_df["len"] <= int(self.max_num_of_features)]
ll_max = logloss_df[logloss_df["mean"] == logloss_df["mean"].max()].iloc[0]
ll_interval = logloss_df[logloss_df["mean"] > (ll_max["mean"] - 0.5 * ll_se)]
res = ll_interval[ll_interval["len"] == ll_interval["len"].min()].iloc[0]
return res
def train(self, train_regions, anndf, sample_class_column, sample_name_column):
"""
Main functionality
:param train_regions: input dataframe with all regions methylation
:type train_regions: pandas.DataFrame
:param anndf: annotation dataframe, containing at least sample name and sample class
:type anndf: pandas.DataFrame
:param sample_class_column: name of the sample class column
:type sample_class_column: str
:param sample_name_column: name of the sample name column
:type sample_name_column: str
:return:
"""
# train_regions = train_regions.T
# First sort both train_regions and annotation according to sample names
train_regions = train_regions.sort_index(ascending=True)
# Ensure annotation contains only samples from the train_regions
anndf = anndf[anndf[sample_name_column].isin(train_regions.index.tolist())].sort_values(
by=[sample_name_column],
ascending=True
).dropna(subset=[sample_name_column])
train_regions = train_regions.ix[anndf[sample_name_column].tolist()]
assert anndf[sample_name_column].tolist() == train_regions.index.tolist(), \
"Samples in the annotations table are diferrent from those in feature table"
# Prefilter regions
selected_regions, clf = self._describe_and_filter_regions(
train_regions,
anndf,
sample_class_column,
sample_name_column,
)
# Estimate feature importances (FI)
first_clf, mean, std = self._train_clf(
train_regions[selected_regions.values],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
feature_importances = pandas.DataFrame.from_records(
zip(selected_regions.values, first_clf.feature_importances_),
columns=[FEATURE_COLUMN, FEATURE_IMPORTANCE_COLUMN],
)
# Visualizing feature importance distribution
self.plot_fi_distribution(feature_importances)
# Extracting correlated site
feature_importances = feature_importances[
abs(feature_importances[FEATURE_IMPORTANCE_COLUMN]) > 0
]
corr_matrix = train_regions[feature_importances[FEATURE_COLUMN]].corr().applymap(
lambda x: 1 if abs(x) >= self.correlation_threshold else 0
)
logloss_df_cols = ["thresh", "mean", "std", "len"]
logloss_di = pandas.DataFrame(columns=logloss_df_cols)
for thresh in np.arange(
feature_importances[FEATURE_IMPORTANCE_COLUMN].quantile(0.99),
feature_importances[FEATURE_IMPORTANCE_COLUMN].max(),
(
feature_importances[FEATURE_IMPORTANCE_COLUMN].max() -
feature_importances[FEATURE_IMPORTANCE_COLUMN].min()
) / self.logloss_estimates
):
selected_features = self._apply_feature_imp_thresh(selected_regions, first_clf.feature_importances_, thresh)
if len(selected_features) < 2:
continue
logger.info(
"Estimating %d features on feature importance threshold %f",
len(selected_features),
thresh
)
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.intermediate_clf_estimators_num,
)
logloss_di = logloss_di.append(
pandas.Series([thresh, mean, std, len(selected_features)], index=logloss_df_cols),
ignore_index=True,
)
logger.info("LogLoss mean=%f, std=%f on threshold %f", mean, std, thresh)
logger.info("Detecting optimal feature subset...")
thresh = self.get_threshold(logloss_di)
logger.info("Selected threshold")
logger.info(thresh)
selected_features = self._apply_feature_imp_thresh(
selected_regions,
first_clf.feature_importances_,
thresh["thresh"],
)
logger.info("Trainig final model...")
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
logger.info("Selected features: {0}".format(selected_features))
pickle.dump((clf, selected_features), open(os.path.join(self.output_folder, TRAINED_MODEL_NAME), 'w'))
return selected_features, clf, mean, std
| gpl-3.0 | -4,018,092,607,383,729,000 | 40.279221 | 125 | 0.632059 | false | 4.055502 | false | false | false |
arximboldi/pigeoncide | src/phys/geom.py | 1 | 1351 | #
# Copyright (C) 2009 Juan Pedro Bolivar Puente, Alberto Villegas Erce
#
# This file is part of Pigeoncide.
#
# Pigeoncide is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Pigeoncide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pandac.PandaModules import *
from base.util import delayed2, selflast
ray = delayed2 (selflast (OdeRayGeom))
sphere = delayed2 (selflast (OdeSphereGeom))
box = delayed2 (selflast (OdeBoxGeom))
capsule = delayed2 (selflast (OdeCappedCylinderGeom))
@delayed2
def node (model, space):
return OdeTriMeshGeom (space, OdeTriMeshData (model, False))
@delayed2
def mesh (model, space, scale = Vec3 (1, 1, 1)):
cg_model = loader.loadModel (model)
cg_model.setScale (scale)
return OdeTriMeshGeom (space,
OdeTriMeshData (cg_model, False))
| gpl-3.0 | -6,525,842,370,010,480,000 | 34.552632 | 72 | 0.720207 | false | 3.303178 | false | false | false |
BevoLJ/KRPC | Lagadha/Lunar_XFer_Manager.py | 1 | 6215 | import numpy as np
import time
# from numba import jit
from Orbit_Manager import OrbitManager
class LunarXFerManager(OrbitManager):
def __init__(self):
super().__init__()
self.mode = "LEO"
self.earth = self.KSC.bodies['Earth']
self.moon = self.KSC.bodies['Moon'].orbit
# M O O N P R I M A R Y O R B I T A L E L E M E N T S
self.moon_eccentricity = self.conn.add_stream(getattr, self.moon, 'eccentricity')
self.moon_inclination = self.conn.add_stream(getattr, self.moon, 'inclination')
self.moon_LAN = self.conn.add_stream(getattr, self.moon, 'longitude_of_ascending_node')
self.moon_semi_major_axis = self.conn.add_stream(getattr, self.moon, 'semi_major_axis')
self.moon_argument_of_periapsis = self.conn.add_stream(getattr, self.moon, 'argument_of_periapsis')
self.moon_ETA_pe = self.conn.add_stream(getattr, self.moon, 'time_to_periapsis')
# S E C O N D A R Y O R B I T A L E L E M E N T S
self.moon_ETA_ap = self.conn.add_stream(getattr, self.moon, 'time_to_apoapsis')
self.moon_mean_anomaly = self.conn.add_stream(getattr, self.moon, 'mean_anomaly')
self.moon_eccentric_anomaly = self.conn.add_stream(getattr, self.moon, 'eccentric_anomaly')
self.moon_true_anomaly = self.true_anomaly(self.moon_eccentricity(), self.moon_eccentric_anomaly())
self.moon_longitude_of_pe = self.longitude_of_pe(self.moon_LAN(), self.moon_argument_of_periapsis())
self.moon_period = self.conn.add_stream(getattr, self.moon, 'period')
self.moon_radius = self.conn.add_stream(getattr, self.moon, 'radius')
self.moon_mean_anomaly_at_epoch = self.conn.add_stream(getattr, self.moon, 'mean_anomaly_at_epoch')
self.moon_epoch = self.conn.add_stream(getattr, self.moon, 'epoch')
def moon_future_mean(self, _ta):
_m_n = self.mean_motion(self.mu, self.moon_radius())
_m_delta = self.mean_delta_time(_m_n, self.ut(), _ta)
return self.moon_mean_anomaly() + _m_delta
def moon_xfer_angle(self, _ta, _target_LAN, _target_arg_pe):
_fut_moon_mean = self.moon_future_mean(_ta)
_ves_l_pe = self.longitude_of_pe(self.LAN(), self.argument_of_periapsis()) % (2 * np.pi)
_moon_l_pe = self.longitude_of_pe(_target_LAN, _target_arg_pe)
return self.xfer_radians(_fut_moon_mean, _ves_l_pe, _moon_l_pe)
def xfer_ETA(self, _ta, _target_LAN, _target_arg_pe):
ang_v = self.ang_V_circle(self.period())
_xfer_radians = self.moon_xfer_angle(_ta, _target_LAN, _target_arg_pe)
if self.mean_anomaly() < _xfer_radians: _rad_diff = (_xfer_radians - self.mean_anomaly()) % (2 * np.pi)
else: _rad_diff = (_xfer_radians - self.mean_anomaly()) % (2 * np.pi)
return _rad_diff / ang_v
def xfer(self):
time.sleep(8)
self.control.activate_next_stage()
time.sleep(2)
self.mode = "Injection"
print(self.mode)
# noinspection PyAttributeOutsideInit
def flameout(self, _mode):
if self.eng_status(self.get_active_engine(), "Status") == "Flame-Out!":
self.stage()
self.mode = _mode
# noinspection PyAttributeOutsideInit
def named_flameout(self, _eng_name):
for eng in self.engines:
if eng.name == _eng_name:
if self.eng_status_specific(eng) == "Flame-Out!":
return True
else:
return False
def injection_ETA(self):
_eta = self.ut() + self.seconds_finder(6, 12, 0)
return self.xfer_ETA(_eta, self.moon_LAN(), self.moon_argument_of_periapsis())
def xfer_setup(self):
self.control.rcs = True
self.control.sas = True
self.ap.sas_mode = self.KSC.SASMode.prograde
self.ap.reference_frame = self.vessel.orbital_reference_frame
self.control.throttle = 0
time.sleep(3)
def warp_moon(self):
while self.body().name == "Earth":
if self.altitude() < 200000:
print(self.altitude())
self.KSC.rails_warp_factor = 2
elif self.altitude() < 35000000:
self.KSC.rails_warp_factor = 3
elif self.altitude() < 300000000:
self.KSC.rails_warp_factor = 5
elif self.altitude() < 375000000:
self.KSC.rails_warp_factor = 4
time.sleep(.01)
self.KSC.rails_warp_factor = 0
def capture_burn(self):
self.KSC.warp_to(self.ut() + self.ETA_pe() - 90)
self.ap.sas_mode = self.KSC.SASMode.retrograde
time.sleep(40)
self.ullage_rcs()
self.control.throttle = 1
while self.eccentricity() > .2: time.sleep(.1)
def lmo_burn(self):
self.KSC.warp_to(self.ut() + self.ETA_ap() - 35)
self.ap.sas_mode = self.KSC.SASMode.retrograde
time.sleep(25)
self.ullage_rcs()
self.control.throttle = 1
while self.periapsis_altitude() > 125000: time.sleep(.1)
self.control.throttle = 0
self.KSC.warp_to(self.ut() + self.ETA_pe() - 35)
self.ap.sas_mode = self.KSC.SASMode.retrograde
self.control.toggle_action_group(2)
time.sleep(25)
self.ullage_rcs()
self.control.throttle = 1
while self.periapsis_altitude() > 50000:
if self.eccentricity() > .05: time.sleep(.1)
self.control.throttle = 0
def tank_enable(self):
for p in self.parts.all:
if 'Hydrazine' in p.resources.names:
for r in p.resources.with_resource('Hydrazine'):
r.enabled = True
def impact_burn(self):
self.control.throttle = 0
self.control.rcs = False
self.control.sas = False
self.stage()
self.tank_enable()
time.sleep(2)
self.control.rcs = True
self.control.sas = True
time.sleep(3)
self.ap.sas_mode = self.KSC.SASMode.retrograde
time.sleep(3)
self.control.throttle = 1
while self.periapsis_radius() > self.body().equatorial_radius - 20000:
time.sleep(1)
self.control.throttle = 0 | mit | 5,158,009,346,149,246,000 | 40.165563 | 111 | 0.595977 | false | 3.087432 | false | false | false |
michaelsmit/openparliament | parliament/hansards/models.py | 1 | 21145 | #coding: utf-8
import gzip, os, re
from collections import defaultdict
import datetime
from django.db import models
from django.conf import settings
from django.core import urlresolvers
from django.core.files.base import ContentFile
from django.template.defaultfilters import slugify
from django.utils.datastructures import SortedDict
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from parliament.core.models import Session, ElectedMember, Politician
from parliament.bills.models import Bill
from parliament.core import parsetools, text_utils
from parliament.core.utils import memoize_property, language_property
from parliament.activity import utils as activity
import logging
logger = logging.getLogger(__name__)
class DebateManager(models.Manager):
def get_query_set(self):
return super(DebateManager, self).get_query_set().filter(document_type=Document.DEBATE)
class EvidenceManager(models.Manager):
def get_query_set(self):
return super(EvidenceManager, self).get_query_set().filter(document_type=Document.EVIDENCE)
class NoStatementManager(models.Manager):
"""Manager restricts to Documents that haven't had statements parsed."""
def get_query_set(self):
return super(NoStatementManager, self).get_query_set()\
.annotate(scount=models.Count('statement'))\
.exclude(scount__gt=0)
def url_from_docid(docid):
return "http://www.parl.gc.ca/HousePublications/Publication.aspx?DocId=%s&Language=%s&Mode=1" % (
docid, settings.LANGUAGE_CODE[0].upper()
) if docid else None
class Document(models.Model):
DEBATE = 'D'
EVIDENCE = 'E'
document_type = models.CharField(max_length=1, db_index=True, choices=(
('D', 'Debate'),
('E', 'Committee Evidence'),
))
date = models.DateField(blank=True, null=True)
number = models.CharField(max_length=6, blank=True) # there exist 'numbers' with letters
session = models.ForeignKey(Session)
source_id = models.IntegerField(unique=True, db_index=True)
most_frequent_word = models.CharField(max_length=20, blank=True)
wordcloud = models.ImageField(upload_to='autoimg/wordcloud', blank=True, null=True)
downloaded = models.BooleanField(default=False,
help_text="Has the source data been downloaded?")
skip_parsing = models.BooleanField(default=False,
help_text="Don't try to parse this, presumably because of errors in the source.")
public = models.BooleanField("Display on site?", default=False)
multilingual = models.BooleanField("Content parsed in both languages?", default=False)
objects = models.Manager()
debates = DebateManager()
evidence = EvidenceManager()
without_statements = NoStatementManager()
class Meta:
ordering = ('-date',)
def __unicode__ (self):
if self.document_type == self.DEBATE:
return u"Hansard #%s for %s (#%s/#%s)" % (self.number, self.date, self.id, self.source_id)
else:
return u"%s evidence for %s (#%s/#%s)" % (
self.committeemeeting.committee.short_name, self.date, self.id, self.source_id)
@memoize_property
def get_absolute_url(self):
if self.document_type == self.DEBATE:
return urlresolvers.reverse('debate', kwargs={
'year': self.date.year, 'month': self.date.month, 'day': self.date.day
})
elif self.document_type == self.EVIDENCE:
return self.committeemeeting.get_absolute_url()
def to_api_dict(self, representation):
d = dict(
date=unicode(self.date) if self.date else None,
number=self.number,
most_frequent_word={'en': self.most_frequent_word},
)
if representation == 'detail':
d.update(
source_id=self.source_id,
source_url=self.source_url,
session=self.session_id,
document_type=self.get_document_type_display(),
)
return d
@property
def url(self):
return self.source_url
@property
def source_url(self):
return url_from_docid(self.source_id)
def _topics(self, l):
topics = []
last_topic = ''
for statement in l:
if statement[0] and statement[0] != last_topic:
last_topic = statement[0]
topics.append((statement[0], statement[1]))
return topics
def topics(self):
"""Returns a tuple with (topic, statement slug) for every topic mentioned."""
return self._topics(self.statement_set.all().values_list('h2_' + settings.LANGUAGE_CODE, 'slug'))
def headings(self):
"""Returns a tuple with (heading, statement slug) for every heading mentioned."""
return self._topics(self.statement_set.all().values_list('h1_' + settings.LANGUAGE_CODE, 'slug'))
def topics_with_qp(self):
"""Returns the same as topics(), but with a link to Question Period at the start of the list."""
statements = self.statement_set.all().values_list(
'h2_' + settings.LANGUAGE_CODE, 'slug', 'h1_' + settings.LANGUAGE_CODE)
topics = self._topics(statements)
qp_seq = None
for s in statements:
if s[2] == 'Oral Questions':
qp_seq = s[1]
break
if qp_seq is not None:
topics.insert(0, ('Question Period', qp_seq))
return topics
@memoize_property
def speaker_summary(self):
"""Returns a sorted dictionary (in order of appearance) summarizing the people
speaking in this document.
Keys are names, suitable for displays. Values are dicts with keys:
slug: Slug of first statement by the person
politician: Boolean -- is this an MP?
description: Short title or affiliation
"""
ids_seen = set()
speakers = SortedDict()
for st in self.statement_set.filter(who_hocid__isnull=False).values_list(
'who_' + settings.LANGUAGE_CODE, # 0
'who_context_' + settings.LANGUAGE_CODE, # 1
'slug', # 2
'politician__name', # 3
'who_hocid'): # 4
if st[4] in ids_seen:
continue
ids_seen.add(st[4])
if st[3]:
who = st[3]
else:
who = parsetools.r_parens.sub('', st[0])
who = re.sub('^\s*\S+\s+', '', who).strip() # strip honorific
if who not in speakers:
info = {
'slug': st[2],
'politician': bool(st[3])
}
if st[1]:
info['description'] = st[1]
speakers[who] = info
return speakers
def outside_speaker_summary(self):
"""Same as speaker_summary, but only non-MPs."""
return SortedDict(
[(k, v) for k, v in self.speaker_summary().items() if not v['politician']]
)
def mp_speaker_summary(self):
"""Same as speaker_summary, but only MPs."""
return SortedDict(
[(k, v) for k, v in self.speaker_summary().items() if v['politician']]
)
def save_activity(self):
statements = self.statement_set.filter(procedural=False).select_related('member', 'politician')
politicians = set([s.politician for s in statements if s.politician])
for pol in politicians:
topics = {}
wordcount = 0
for statement in filter(lambda s: s.politician == pol, statements):
wordcount += statement.wordcount
if statement.topic in topics:
# If our statement is longer than the previous statement on this topic,
# use its text for the excerpt.
if len(statement.text_plain()) > len(topics[statement.topic][1]):
topics[statement.topic][1] = statement.text_plain()
topics[statement.topic][2] = statement.get_absolute_url()
else:
topics[statement.topic] = [statement.slug, statement.text_plain(), statement.get_absolute_url()]
for topic in topics:
if self.document_type == Document.DEBATE:
activity.save_activity({
'topic': topic,
'url': topics[topic][2],
'text': topics[topic][1],
}, politician=pol, date=self.date, guid='statement_%s' % topics[topic][2], variety='statement')
elif self.document_type == Document.EVIDENCE:
assert len(topics) == 1
if wordcount < 80:
continue
(seq, text, url) = topics.values()[0]
activity.save_activity({
'meeting': self.committeemeeting,
'committee': self.committeemeeting.committee,
'text': text,
'url': url,
'wordcount': wordcount,
}, politician=pol, date=self.date, guid='cmte_%s' % url, variety='committee')
def serializable(self):
return {
'date': self.date,
'url': self.get_absolute_url(),
'id': self.id,
'original_url': self.url,
'parliament': self.session.parliamentnum,
'session': self.session.sessnum,
'statements': [s.serializable()
for s in self.statement_set.all()
.order_by('sequence')
.select_related('member__politician', 'member__party', 'member__riding')]
}
def get_wordoftheday(self):
if not self.most_frequent_word:
self.most_frequent_word = text_utils.most_frequent_word(self.statement_set.filter(procedural=False))
if self.most_frequent_word:
self.save()
return self.most_frequent_word
def generate_wordcloud(self):
image = text_utils.statements_to_cloud_by_party(self.statement_set.filter(procedural=False))
self.wordcloud.save("%s-%s.png" % (self.source_id, settings.LANGUAGE_CODE), ContentFile(image), save=True)
self.save()
def get_filename(self, language):
assert self.source_id
assert language in ('en', 'fr')
return '%d-%s.xml.gz' % (self.source_id, language)
def get_filepath(self, language):
filename = self.get_filename(language)
if hasattr(settings, 'HANSARD_CACHE_DIR'):
return os.path.join(settings.HANSARD_CACHE_DIR, filename)
else:
return os.path.join(settings.MEDIA_ROOT, 'document_cache', filename)
def _save_file(self, path, content):
out = gzip.open(path, 'wb')
out.write(content)
out.close()
def get_cached_xml(self, language):
if not self.downloaded:
raise Exception("Not yet downloaded")
return gzip.open(self.get_filepath(language), 'rb')
def delete_downloaded(self):
for lang in ('en', 'fr'):
path = self.get_filepath(lang)
if os.path.exists(path):
os.unlink(path)
self.downloaded = False
self.save()
def _fetch_xml(self, language):
import urllib2
return urllib2.urlopen('http://www.parl.gc.ca/HousePublications/Publication.aspx?DocId=%s&Language=%s&Mode=1&xml=true'
% (self.source_id, language[0].upper())).read()
def download(self):
if self.downloaded:
return True
if self.date and self.date.year < 2006:
raise Exception("No XML available before 2006")
langs = ('en', 'fr')
paths = [self.get_filepath(l) for l in langs]
if not all((os.path.exists(p) for p in paths)):
for path, lang in zip(paths, langs):
self._save_file(path, self._fetch_xml(lang))
self.downloaded = True
self.save()
class Statement(models.Model):
document = models.ForeignKey(Document)
time = models.DateTimeField(db_index=True)
source_id = models.CharField(max_length=15, blank=True)
slug = models.SlugField(max_length=100, blank=True)
urlcache = models.CharField(max_length=200, blank=True)
h1_en = models.CharField(max_length=300, blank=True)
h2_en = models.CharField(max_length=300, blank=True)
h3_en = models.CharField(max_length=300, blank=True)
h1_fr = models.CharField(max_length=400, blank=True)
h2_fr = models.CharField(max_length=400, blank=True)
h3_fr = models.CharField(max_length=400, blank=True)
member = models.ForeignKey(ElectedMember, blank=True, null=True)
politician = models.ForeignKey(Politician, blank=True, null=True) # a shortcut -- should == member.politician
who_en = models.CharField(max_length=300, blank=True)
who_fr = models.CharField(max_length=500, blank=True)
who_hocid = models.PositiveIntegerField(blank=True, null=True, db_index=True)
who_context_en = models.CharField(max_length=300, blank=True)
who_context_fr = models.CharField(max_length=500, blank=True)
content_en = models.TextField()
content_fr = models.TextField(blank=True)
sequence = models.IntegerField(db_index=True)
wordcount = models.IntegerField()
procedural = models.BooleanField(default=False, db_index=True)
written_question = models.CharField(max_length=1, blank=True, choices=(
('Q', 'Question'),
('R', 'Response')
))
statement_type = models.CharField(max_length=35, blank=True)
bills = models.ManyToManyField(Bill, blank=True)
mentioned_politicians = models.ManyToManyField(Politician, blank=True, related_name='statements_with_mentions')
class Meta:
ordering = ('sequence',)
unique_together = (
('document', 'slug')
)
h1 = language_property('h1')
h2 = language_property('h2')
h3 = language_property('h3')
who = language_property('who')
who_context = language_property('who_context')
def save(self, *args, **kwargs):
if not self.wordcount:
self.wordcount = parsetools.countWords(self.text_plain())
self.content_en = self.content_en.replace('\n', '').replace('</p>', '</p>\n').strip()
self.content_fr = self.content_fr.replace('\n', '').replace('</p>', '</p>\n').strip()
if ((not self.procedural) and self.wordcount <= 300
and (
(parsetools.r_notamember.search(self.who) and re.search(r'(Speaker|Chair|président)', self.who))
or (not self.who)
or not any(p for p in self.content_en.split('\n') if 'class="procedural"' not in p)
)):
# Some form of routine, procedural statement (e.g. somethng short by the speaker)
self.procedural = True
if not self.urlcache:
self.generate_url()
super(Statement, self).save(*args, **kwargs)
@property
def date(self):
return datetime.date(self.time.year, self.time.month, self.time.day)
def generate_url(self):
self.urlcache = "%s%s/" % (
self.document.get_absolute_url(),
(self.slug if self.slug else self.sequence))
def get_absolute_url(self):
if not self.urlcache:
self.generate_url()
return self.urlcache
def __unicode__ (self):
return u"%s speaking about %s around %s" % (self.who, self.topic, self.time)
@property
@memoize_property
def content_floor(self):
if not self.content_fr:
return self.content_en
el, fl = self.content_en.split('\n'), self.content_fr.split('\n')
if len(el) != len(fl):
logger.error("Different en/fr paragraphs in %s" % self.get_absolute_url())
return self.content_en
r = []
for e, f in zip(el, fl):
idx = e.find('data-originallang="')
if idx and e[idx+19:idx+21] == 'fr':
r.append(f)
else:
r.append(e)
return u"\n".join(r)
def text_html(self, language=settings.LANGUAGE_CODE):
return mark_safe(getattr(self, 'content_' + language))
def text_plain(self, language=settings.LANGUAGE_CODE):
return (strip_tags(getattr(self, 'content_' + language)
.replace('\n', '')
.replace('<br>', '\n')
.replace('</p>', '\n\n'))
.strip())
# temp compatibility
@property
def heading(self):
return self.h1
@property
def topic(self):
return self.h2
def serializable(self):
v = {
'url': self.get_absolute_url(),
'heading': self.heading,
'topic': self.topic,
'time': self.time,
'attribution': self.who,
'text': self.text_plain()
}
if self.member:
v['politician'] = {
'id': self.member.politician.id,
'member_id': self.member.id,
'name': self.member.politician.name,
'url': self.member.politician.get_absolute_url(),
'party': self.member.party.short_name,
'riding': unicode(self.member.riding),
}
return v
def to_api_dict(self, representation):
d = dict(
time=unicode(self.time) if self.time else None,
attribution={'en': self.who_en, 'fr': self.who_fr},
content={'en': self.content_en, 'fr': self.content_fr},
url=self.get_absolute_url(),
politician_url=self.politician.get_absolute_url() if self.politician else None,
politician_membership_url=urlresolvers.reverse('politician_membership',
kwargs={'member_id': self.member_id}) if self.member_id else None,
procedural=self.procedural,
source_id=self.source_id
)
for h in ('h1', 'h2', 'h3'):
if getattr(self, h):
d[h] = {'en': getattr(self, h + '_en'), 'fr': getattr(self, h + '_fr')}
d['document_url'] = d['url'][:d['url'].rstrip('/').rfind('/')+1]
return d
@property
@memoize_property
def name_info(self):
info = {
'post': None,
'named': True
}
if not self.member:
info['display_name'] = parsetools.r_mister.sub('', self.who)
if self.who_context:
if self.who_context in self.who:
info['display_name'] = parsetools.r_parens.sub('', info['display_name'])
info['post'] = self.who_context
else:
info['post_reminder'] = self.who_context
if self.who_hocid:
info['url'] = '/search/?q=Witness%%3A+%%22%s%%22' % self.who_hocid
else:
info['url'] = self.member.politician.get_absolute_url()
if parsetools.r_notamember.search(self.who):
info['display_name'] = self.who
if self.member.politician.name in self.who:
info['display_name'] = re.sub(r'\(.+\)', '', self.who)
info['named'] = False
elif not '(' in self.who or not parsetools.r_politicalpost.search(self.who):
info['display_name'] = self.member.politician.name
else:
post_match = re.search(r'\((.+)\)', self.who)
if post_match:
info['post'] = post_match.group(1).split(',')[0]
info['display_name'] = self.member.politician.name
return info
@staticmethod
def set_slugs(statements):
counter = defaultdict(int)
for statement in statements:
slug = slugify(statement.name_info['display_name'])[:50]
if not slug:
slug = 'procedural'
counter[slug] += 1
statement.slug = slug + '-%s' % counter[slug]
@property
def committee_name(self):
if self.document.document_type != Document.EVIDENCE:
return ''
return self.document.committeemeeting.committee.short_name
@property
def committee_slug(self):
if self.document.document_type != Document.EVIDENCE:
return ''
return self.document.committeemeeting.committee.slug
class OldSequenceMapping(models.Model):
document = models.ForeignKey(Document)
sequence = models.PositiveIntegerField()
slug = models.SlugField(max_length=100)
class Meta:
unique_together = (
('document', 'sequence')
)
def __unicode__(self):
return u"%s -> %s" % (self.sequence, self.slug)
| agpl-3.0 | -9,099,643,264,234,737,000 | 38.2282 | 126 | 0.572172 | false | 3.823508 | false | false | false |
vpelletier/neoppod | neo/scripts/simple.py | 1 | 2244 | #!/usr/bin/env python
#
# Copyright (C) 2011-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect, random, signal, sys
from logging import getLogger, INFO
from optparse import OptionParser
from neo.lib import logging
from neo.tests import functional
logging.backlog()
del logging.default_root_handler.handle
def main():
args, _, _, defaults = inspect.getargspec(functional.NEOCluster.__init__)
option_list = zip(args[-len(defaults):], defaults)
parser = OptionParser(usage="%prog [options] [db...]",
description="Quickly setup a simple NEO cluster for testing purpose.")
parser.add_option('--seed', help="settings like node ports/uuids and"
" cluster name are random: pass any string to initialize the RNG")
defaults = {}
for option, default in sorted(option_list):
kw = {}
if type(default) is bool:
kw['action'] = "store_true"
defaults[option] = False
elif default is not None:
defaults[option] = default
if isinstance(default, int):
kw['type'] = "int"
parser.add_option('--' + option, **kw)
parser.set_defaults(**defaults)
options, args = parser.parse_args()
if options.seed:
functional.random = random.Random(options.seed)
getLogger().setLevel(INFO)
cluster = functional.NEOCluster(args, **{x: getattr(options, x)
for x, _ in option_list})
try:
cluster.run()
logging.info("Cluster running ...")
cluster.waitAll()
finally:
cluster.stop()
if __name__ == "__main__":
main()
| gpl-2.0 | -6,943,767,685,848,343,000 | 37.033898 | 78 | 0.655526 | false | 4.10989 | false | false | false |
google/timesketch | api_client/python/timesketch_api_client/searchtemplate.py | 1 | 8044 | # Copyright 2021 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API search template object."""
import logging
from . import error
from . import resource
from . import search
logger = logging.getLogger('timesketch_api.searchtemplate')
class SearchTemplate(resource.BaseResource):
"""Search template object. TEST e2e"""
def __init__(self, api):
"""Initialize the search template object."""
super().__init__(api, 'searchtemplate/')
self._description = ''
self._name = ''
self._resource_id = None
self._search_id = None
self._sketch_id = None
@property
def description(self):
"""Property that returns the template description."""
if self._description:
return self._description
if not self._resource_id:
logger.error('No resource ID, have you loaded the template yet?')
raise ValueError('Unable to get a name, not loaded yet.')
data = self.lazyload_data()
objects = data.get('objects')
if not objects:
return 'No description'
self._description = objects[0].get('description', 'N/A')
return self._description
def delete(self):
"""Deletes the saved graph from the store."""
if not self._resource_id:
raise ValueError(
'Unable to delete the search template, since the template '
'does not seem to be saved, which is required.')
resource_url = (
f'{self.api.api_root}/searchtemplate/{self._resource_id}/')
response = self.api.session.delete(resource_url)
return error.check_return_status(response, logger)
def from_saved(self, template_id, sketch_id=0):
"""Initialize the search template from a saved template, by ID value.
Args:
template_id: integer value for the saved search template.
sketch_id: optional integer value for a sketch ID. If not
provided, an attempt is made to figure it out.
Raises:
ValueError: If issues came up during processing.
"""
self._resource_id = template_id
self.resource_uri = f'searchtemplate/{self._resource_id}/'
if sketch_id:
self._sketch_id = sketch_id
else:
data = self.lazyload_data(refresh_cache=True)
meta = data.get('meta', {})
sketch_ids = meta.get('sketch_ids', [])
if len(sketch_ids) > 1:
sketch_string = ', '.join(sketch_ids)
raise ValueError(
'Search template has too many attached saved searches, '
'please pick one from: {0:s}'.format(sketch_string))
self._sketch_id = sketch_ids[0]
def from_search_object(self, search_obj):
"""Initialize template from a search object.
Args:
search_obj (search.Search): a search object.
"""
self._search_id = search_obj.id
self._sketch_id = search_obj.sketch.id
response = self.api.fetch_resource_data('searchtemplate/')
meta = response.get('meta', {})
template_id = 0
for data in meta.get('collection', []):
if data.get('search_id') == self._search_id:
template_id = data.get('template_id', 0)
if not template_id:
return
self._resource_id = template_id
self.resource_uri = f'searchtemplate/{self._resource_id}/'
@property
def id(self):
"""Property that returns back the search template ID."""
return self._resource_id
@property
def name(self):
"""Property that returns the template name."""
if self._name:
return self._name
if not self._resource_id:
logger.error('No resource ID, have you loaded the template yet?')
raise ValueError('Unable to get a name, not loaded yet.')
data = self.lazyload_data()
objects = data.get('objects')
if not objects:
return 'No name'
self._name = objects[0].get('name', 'N/A')
return self._name
def set_sketch(self, sketch=None, sketch_id=None):
"""Set the sketch for the search template.
Args:
sketch (sketch.Sketch): an optional sketch object to use as the
sketch object for the search template.
sketch_id (int): an optional sketch ID to use as the sketch ID
for the search template.
Raises:
ValueError: If neither a sketch nor a sketch ID is set.
"""
if not sketch and not sketch_id:
raise ValueError('Either sketch or sketch ID needs to be set.')
if sketch:
self._sketch_id = sketch
elif isinstance(sketch_id, int):
self._sketch_id = sketch_id
else:
raise ValueError(
'Sketch needs to be set, or an integer value for '
'a sketch ID.')
def save(self):
"""Save the search template."""
if self._resource_id:
raise ValueError(
'The template has already been saved, ATM updates to an '
'existing template are not yet supported.')
if not self._search_id:
raise ValueError(
'Unable to save the search template since the identification '
'value of the saved search is not known. The object needs '
'to be initialized from a previously saved search.')
data = {
'search_id': self._search_id,
}
resource_url = f'{self.api.api_root}/searchtemplate/'
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
error.error_message(
response, 'Unable to save search as a template',
error=RuntimeError)
response_json = error.get_response_json(response, logger)
template_dict = response_json.get('objects', [{}])[0]
self._resource_id = template_dict.get('id', 0)
self.resource_uri = f'searchtemplate/{self._resource_id}/'
return f'Saved search as a template to ID: {self.id}'
def to_search(self):
"""Returns a search object from a template."""
if not self._resource_id:
raise ValueError(
'Unable to get a search object unless it is tied to a '
'template.')
if not self._sketch_id:
raise ValueError(
'Unable to get a search object unless it is tied to '
'a sketch.')
data = self.lazyload_data(refresh_cache=True)
objects = data.get('objects')
if not objects:
raise ValueError(
'Unable to get search object, issue with retrieving '
'template data.')
template_dict = objects[0]
sketch = self.api.get_sketch(self._sketch_id)
search_obj = search.Search(sketch=sketch)
search_obj.from_manual(
query_string=template_dict.get('query_string'),
query_dsl=template_dict.get('query_dsl'),
query_filter=template_dict.get('query_filter'))
search_obj.name = template_dict.get('name', 'No Name')
search_obj.description = template_dict.get(
'description', 'No Description')
return search_obj
| apache-2.0 | 374,413,808,863,143,740 | 34.59292 | 78 | 0.587892 | false | 4.271907 | false | false | false |
CuonDeveloper/cuon | cuon_server/src/cuon/Finances.py | 1 | 14964 | import random
import xmlrpclib
from twisted.web import xmlrpc
from basics import basics
import Database
class Finances(xmlrpc.XMLRPC, basics):
def __init__(self):
basics.__init__(self)
self.oDatabase = Database.Database()
self.debugFinances = 1
def getCashAccountBook(self, dicSearchfields, dicUser):
dicUser['NoWhereClient'] = 'YES'
client = dicUser['client']
dicResults = {}
sSql = " select a.designation as designation, a.document_number1 as nr1, a.document_number2 as nr2, "
sSql = sSql + " to_char(a.accounting_date, \'" + dicUser['SQLDateFormat'] + "\') as date, "
sSql = sSql + " a.account_1 as account, a.value_c1 as credit, a.value_d1 as debit "
#sSql = sSql + " account_2, value_c2, value_d2 "
#sSql = sSql + " account_3, value_c3, value_d3 "
#sSql = sSql + " account_4, value_c4, value_d4 "
sSql = sSql + "from account_sentence as a "
sSql = sSql + "where date_part('year', a.accounting_date) = " + dicSearchfields['eYear'] +" "
sSql = sSql + "and date_part('month', a.accounting_date) = " + dicSearchfields['eMonth'] +" "
sSql = sSql + self.getWhere("",dicUser,2,'a.')
sSql = sSql + " order by a.accounting_date "
self.writeLog('getCashAcountBook sSql = ' + `sSql`,self.debugFinances)
result_main = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
sDate_begin = dicSearchfields['eYear'] + '/' + dicSearchfields['eMonth'] + '/' + '01'
sSql = "select (sum(value_c1) - sum(value_d1)) as saldo from account_sentence"
sW = " where accounting_date < '" + sDate_begin + "' "
sSql = sSql + self.getWhere(sW, dicUser,1)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
if result not in ['NONE','ERROR']:
if result[0]['saldo'] >= 0:
result[0]['saldo_debit'] = result[0]['saldo']
else:
result[0]['saldo_credit'] = result[0]['saldo']
fSaldo = result[0]['saldo']
self.writeLog('getCashAcountBook result_main = ' + `result_main`,self.debugFinances)
for v1 in result_main:
fSaldo = fSaldo + v1['debit'] - v1['credit']
v1['saldo'] = fSaldo
result[0]['saldo_end'] = fSaldo
dicResults['cab'] = result_main
dicResults['before'] = result
return dicResults
def xmlrpc_get_cab_doc_number1(self, dicUser):
self.writeLog('new CAB-Number for doc1')
ret = -1
cSql = "select nextval(\'numerical_cash_account_book_doc_number1" + "_client_" + `dicUser['client']` + "\') "
self.writeLog('CAB1-cSql = ' + cSql,self.debugFinances)
#context.src.logging.writeLog('User = ' + `dicUser`)
dicNumber = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('dicNumber = ' + `dicNumber`)
if dicNumber:
ret = dicNumber[0]['nextval']
return ret
def xmlrpc_getLastDate(self, dicUser):
self.writeLog('start py_get_LastDate',self.debugFinances)
sSql = "select to_char(now(),'" + dicUser['SQLDateFormat'] + "\') as last_date"
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
ret = result[0]['last_date']
cSql = "select to_char(accounting_date,'" +dicUser['SQLDateFormat'] + "\') as last_date from account_sentence "
cSql = cSql + " where id = (select max(id) as max_id from account_sentence "
self.writeLog('get0 cSql = ' + cSql,self.debugFinances)
cSql = cSql + self.getWhere("",dicUser,1)
cSql = cSql + ")"
self.writeLog('get cSql = ' + `cSql`,self.debugFinances)
#context.src.logging.writeLog('User = ' + `dicUser`)
liS = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liS = ' + `liS`,self.debugFinances)
if liS and liS not in ['NONE','ERROR']:
ret = liS[0]['last_date']
return ret
def xmlrpc_get_AccountPlanNumber(self, id, dicUser):
self.writeLog('get acctPlanNumber for ' + `id`)
ret = 'NONE'
sSql = "select name from account_plan where id = " + `id`
sSql = sSql + self.getWhere("",dicUser, 2)
self.writeLog('get AcctPlan cSql = ' + sSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
self.writeLog('liAcct = ' + `liAcct`)
if liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['name']
return ret
def xmlrpc_get_iAcct(self,iAcct, dicUser):
ret = 'NONE'
liAcct = None
if iAcct and iAcct not in ['NONE','ERROR']:
cSql = "select designation from account_info where id = " + `iAcct`
#self.writeLog('acct SQL ' + `sAcct` + ', ' + `cSql`)
cSql = cSql + self.getWhere("",dicUser,2)
#self.writeLog('get Acct cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liAcct = ' + `liAcct`)
if liAcct and liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['designation']
return ret
def xmlrpc_get_acct(self,sAcct, dicUser):
self.writeLog('new acct Info for ' + `sAcct`)
ret = 'NONE'
liAcct = None
if sAcct and sAcct not in ['NONE','ERROR']:
cSql = "select designation from account_info where account_number = '" + sAcct + "'"
self.writeLog('acct SQL ' + `sAcct` + ', ' + `cSql`)
cSql = cSql + self.getWhere("",dicUser,2)
self.writeLog('get Acct cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liAcct = ' + `liAcct`)
if liAcct and liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['designation']
return ret
def xmlrpc_getAcctID(self, sAcct, dicUser):
ret = 0
liAcct = None
if sAcct and sAcct not in ['NONE','ERROR']:
cSql = "select id from account_info where account_number = '" + sAcct + "'"
#self.writeLog('acct SQL ' + `sAcct` + ', ' + `cSql`)
cSql = cSql + self.getWhere("",dicUser,2)
#self.writeLog('get Acct cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
#self.writeLog('liAcct = ' + `liAcct`)
if liAcct and liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['id']
return ret
def xmlrpc_get_cabShortKeyValues(self, s, dicUser):
self.writeLog('start py_get_cabShortKeyValues')
ret = -1
cSql = "select max(id) as max_id from account_sentence where short_key = '" + s + "'"
self.writeLog('get0 cSql = ' + cSql)
cSql = cSql + self.getWhere("",dicUser,1)
self.writeLog('get cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liS = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liS = ' + `liS`)
if liS not in ['NONE','ERROR']:
ret = liS[0]['max_id']
return ret
def xmlrpc_get_cab_designation(self, id, dicUser):
ret = 'NONE'
cSql = "select designation from account_sentence where id = " + `id`
sSql = sSql + self.getWhere("",dicUser,1)
self.writeLog('get cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liS = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liS = ' + `liS`)
if liS not in ['NONE','ERROR']:
ret = liS[0]['designation']
return ret
def xmlrpc_get_cab_doc_number1(self, dicUser):
self.writeLog('new CAB-Number for doc1')
ret = -1
cSql = "select nextval(\'numerical_cash_account_book_doc_number1" + "_client_" + `dicUser['client']` + "\') "
self.writeLog('CAB1-cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
dicNumber = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('dicNumber = ' + `dicNumber`)
if dicNumber not in ['NONE','ERROR']:
ret = dicNumber[0]['nextval']
return ret
def xmlrpc_updateAccountInfo(self, dicAcct, dicUser):
self.writeLog('Search for account_Number ' )
sSql = "select id from account_plan where name = '" + dicAcct['account_plan_number'][0] + "'"
sSql = sSql + self.getWhere("",dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
pn = 'NONE'
if result not in ['NONE','ERROR'] and result[0].has_key('id'):
dicAcct['account_plan_number'] = [result[0]['id'], 'int']
pn = result[0]['id']
print 'pn = ', pn
if pn not in ['NONE','ERROR']:
sSql = "select id from account_info where account_number = '" + dicAcct['account_number'][0] + "' and account_plan_number = " + `pn`
sSql = sSql + self.getWhere("",dicUser,2)
self.writeLog('Search for account_Number sSql = ' + `sSql` )
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
self.writeLog('result id by finances = ' + `result`)
if result not in ['NONE','ERROR']:
id = result[0]['id']
else:
id = -1
dicAcct['client'] = [dicUser['client'],'int']
result = self.oDatabase.xmlrpc_saveRecord('account_info',id, dicAcct, dicUser)
self.writeLog('dicAcct = ' + `dicAcct`)
return result
def xmlrpc_getTotalAmount(self, order_id, dicUser):
total_amount = 0
sSql = " select total_amount from list_of_invoices where order_number = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
for i in result:
total_amount += i['total_amount']
#total_amount = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_amount,self.CURRENCY_ROUND)
return total_amount
def xmlrpc_getTotalAmountString(self, OrderID, dicUser):
retValue = '0'
total_sum = self.xmlrpc_getTotalAmount(OrderID,dicUser)
try:
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
#print "Amount of invoice = ", retValue
return retValue
def xmlrpc_getTotalInpayment(self, order_id, dicUser):
total_amount = 0
sSql = " select sum(inpayment) as sum_inpayment from in_payment where order_id = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
print "result inpayment", result
total_amount = result[0]['sum_inpayment']
#total_amount = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_amount,self.CURRENCY_ROUND)
return total_amount
def xmlrpc_getTotalInpaymentString(self, OrderID, dicUser):
retValue = '0'
total_sum = self.xmlrpc_getTotalInpayment(OrderID,dicUser)
try:
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
#print "Amount of invoice = ", retValue
return retValue
def xmlrpc_getTotalDiscount(self, order_id, dicUser):
total_amount = 0
sSql = " select cash_discount from in_payment where order_id = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
for i in result:
total_amount += i['cash_discount']
#total_amount = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_amount,self.CURRENCY_ROUND)
return total_amount
def getDiscountSumString(self):
retValue = '0'
try:
total_sum = self.xmlrpc_getTotalDiscount(OrderID,dicUser)
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
return retValue
def xmlrpc_getResidueSumString(self, OrderID, dicUser):
retValue = '0'
print "getResidueSumString startet"
try:
inpayment = self.xmlrpc_getTotalInpayment(OrderID, dicUser)
print "inp", inpayment
total_amount = self.xmlrpc_getTotalAmount(OrderID,dicUser)
print "ta", total_amount
discount = self.xmlrpc_getTotalDiscount(OrderID,dicUser)
print "dis", discount
#print inpayment, total_amount, discount
#"%.2f"%y
total_sum = total_amount - inpayment - discount
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except Exception, params:
print Exception, params
print "Residue", retValue
return retValue
def xmlrpc_createTicketFromInpayment(self, inpayment_id, dicUser):
ret = True
return ret
def xmlrpc_createTicketFromInvoice(self, invoice_id, dicUser):
ret = True
print 'new ticket'
sSql = "select orb.id, orb.discount, orb.packing_cost, orb.postage_cost, orb.misc_cost, "
sSql += "inv.id "
sSql += "from orderbook as orb, list_of_invoices as inv "
sSql += "where orb.id = inv.order_number "
sSql += self.getWhere('', dicUser, 2, 'inv.')
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
return ret
| gpl-3.0 | 4,789,738,053,648,609,000 | 41.754286 | 145 | 0.559276 | false | 3.497896 | false | false | false |
MapsPy/MapsPy | maps_monitor.py | 1 | 10537 | '''
Created on Jun 6, 2013
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import os
import sys
import shutil
from time import gmtime, strftime
import time
import platform
import Settings
import maps_batch
import traceback
settings_filename = 'settings.ini'
#-----------------------------------------------------------------------------------------------------
def check_for_alias(directory_str, alias_dict):
ret_str = directory_str
for key in alias_dict.iterkeys():
if directory_str.startswith(key):
ret_str = directory_str.replace(key, alias_dict[key])
break
return ret_str
#-----------------------------------------------------------------------------------------------------
def parse_aliases(alias_str):
all_aliases = alias_str.split(';')
alias_dict = dict()
for single_set in all_aliases:
split_single = single_set.split(',')
if len(split_single) > 1:
alias_dict[split_single[0]] = split_single[1]
return alias_dict
#-----------------------------------------------------------------------------------------------------
def main(mySettings):
jobs_path = mySettings[Settings.MONITOR_JOBS_PATH]
processing_path = mySettings[Settings.MONITOR_PROCESSING_PATH]
info_path = mySettings[Settings.MONITOR_FINISHED_INFO_PATH]
done_path = mySettings[Settings.MONITOR_DONE_PATH]
computer = mySettings[Settings.MONITOR_COMPUTER_NAME]
check_interval = int(mySettings[Settings.MONITOR_CHECK_INTERVAL])
alias_dict = parse_aliases(mySettings[Settings.MONITOR_DIR_ALIAS])
working_dir = os.getcwd()
#todo: create folders if they don't exist
#os.chdir(jobs_path)
print 'Starting maps_monitor with'
print 'jobs_path = ',jobs_path
print 'processing_path = ',processing_path
print 'finished_info_path = ',info_path
print 'done_path = ',done_path
print 'computer name = ',computer
print 'directory aliases = ',alias_dict
print 'checking every ',check_interval,'seconds'
#print 'changed into ', jobs_path
#make sure the following are defined:
keyword_a = 0
keyword_b = 0
keyword_c = 0
keyword_d = 0
keyword_e = 0
keyword_f = 0
statusfile = 'status_'+computer
print 'changed into ', jobs_path
print strftime("%Y-%m-%d %H:%M:%S", gmtime())
true = 1
while true:
filenames = []
try:
os.chdir(jobs_path)
dirList=os.listdir(jobs_path)
for fname in dirList:
if (fname[0:4] == 'job_') and (fname[-4:] == '.txt') :
filenames.append(fname)
except:
print 'error changing dir'
time.sleep(5)
no_files =len(filenames)
if no_files == 0 :
#time.sleep(300.0)
time.sleep(check_interval)
print 'no jobs found, waiting ...'
print strftime("%Y-%m-%d %H:%M:%S", gmtime())
f = open(statusfile+'_idle.txt', 'w')
f.write(strftime("%Y-%m-%d %H:%M:%S", gmtime())+'\n')
f.close()
continue
if no_files > 0 :
try:
os.remove(statusfile+'_idle.txt')
except:
pass
time_started = strftime("%Y-%m-%d %H:%M:%S", gmtime())
version = 0
total_number_detectors = 1
max_no_processors_files = 1
max_no_processors_lines = 1
write_hdf = 0
quick_dirty = 0
xrf_bin = 0
nnls = 0
xanes_scan = 0
detector_to_start_with = 0
#default beamline to use for now is 2-id-e , we will change this in the future
beamline = '2-ID-E'
print 'found a job waiting, in file: ', filenames[0]
print 'read data file'
f = open(statusfile+'_working.txt', 'w')
f.write(strftime("%Y-%m-%d %H:%M:%S", gmtime())+'\n')
f.write('found a job waiting, in file: '+ filenames[0]+'\n')
f.close()
time.sleep(5)
standard_filenames = []
try:
f = open(filenames[0], 'rt')
for line in f:
if ':' in line :
slist = line.split(':')
tag = slist[0]
value = ':'.join(slist[1:])
if tag == 'DIRECTORY': directory = value.strip()
elif tag == 'A' : keyword_a = int(value)
elif tag == 'B' : keyword_b = int(value)
elif tag == 'C' : keyword_c = int(value)
elif tag == 'D' : keyword_d = int(value)
elif tag == 'E' : keyword_e = int(value)
elif tag == 'F' : keyword_f = int(value)
elif tag == 'DETECTOR_ELEMENTS' : total_number_detectors = int(value)
elif tag == 'MAX_NUMBER_OF_FILES_TO_PROCESS' : max_no_processors_files = int(value)
elif tag == 'MAX_NUMBER_OF_LINES_TO_PROCESS' : max_no_processors_lines = int(value)
elif tag == 'QUICK_DIRTY' : quick_dirty = int(value)
elif tag == 'XRF_BIN' : xrf_bin = int(value)
elif tag == 'NNLS' : nnls = int(value)
elif tag == 'XANES_SCAN' : xanes_scan = int(value)
elif tag == 'DETECTOR_TO_START_WITH' : detector_to_start_with = int(value)
elif tag == 'BEAMLINE' : beamline = str(value).strip()
elif tag == 'STANDARD' : standard_filenames.append(str(value).strip())
f.close()
except: print 'Could not read file: ', filenames[0]
directory = check_for_alias(directory, alias_dict)
print 'move job into processing directory'
shutil.copy(filenames[0], os.path.join(processing_path, filenames[0]))
os.remove(filenames[0])
if keyword_f == 1:
keyword_a = 1
keyword_b = 1
keyword_c = 1
keyword_d = 1
keyword_e = 1
print 'now moving into directory to analyse ', directory
os.chdir(directory)
f = open('maps_settings.txt', 'w')
f.write(' This file will set some MAPS settings mostly to do with fitting'+'\n')
f.write('VERSION:' + str(version).strip()+'\n')
f.write('DETECTOR_ELEMENTS:' + str(total_number_detectors).strip()+'\n')
f.write('MAX_NUMBER_OF_FILES_TO_PROCESS:' + str(max_no_processors_files).strip()+'\n')
f.write('MAX_NUMBER_OF_LINES_TO_PROCESS:' + str(max_no_processors_lines).strip()+'\n')
f.write('QUICK_DIRTY:' + str(quick_dirty).strip()+'\n')
f.write('XRF_BIN:' + str(xrf_bin).strip()+'\n')
f.write('NNLS:' + str(nnls).strip()+'\n')
f.write('XANES_SCAN:' + str(xanes_scan).strip()+'\n')
f.write('DETECTOR_TO_START_WITH:' + str(detector_to_start_with).strip()+'\n')
f.write('BEAMLINE:' + beamline.strip()+'\n')
for item in standard_filenames:
f.write('STANDARD:' + item.strip()+'\n')
f.close()
os.chdir(working_dir)
try:
maps_batch.main(wdir=directory, a=keyword_a, b=keyword_b, c=keyword_c, d=keyword_d, e=keyword_e)
except:
print 'Error processing',directory
traceback.print_exc(file=sys.stdout)
os.chdir(processing_path)
print 'move job into processing directory'
shutil.copy(os.path.join(processing_path,filenames[0]), os.path.join(done_path,filenames[0]))
os.remove(filenames[0])
os.chdir(info_path)
f = open('finished_'+filenames[0], 'w')
f.write( 'time started: ' + time_started+'\n')
f.write( 'time finished: '+ strftime("%Y-%m-%d %H:%M:%S", gmtime())+'\n')
f.write( 'computer that did analysis '+ computer+'\n')
f.write( '--------------------------------------'+'\n')
f.write( '')
f.write( '')
f.write( '')
f.write( 'used the following settings'+'\n')
f.write('VERSION:' + str(version).strip()+'\n')
f.write( 'A:'+ str(keyword_a).strip()+'\n')
f.write( 'B:'+ str(keyword_b).strip()+'\n')
f.write( 'C:'+ str(keyword_c).strip()+'\n')
f.write( 'D:'+ str(keyword_d).strip()+'\n')
f.write( 'E:'+ str(keyword_e).strip()+'\n')
f.write( 'F:'+ str(keyword_f).strip()+'\n')
f.write('DETECTOR_ELEMENTS:' + str(total_number_detectors).strip()+'\n')
f.write('MAX_NUMBER_OF_FILES_TO_PROCESS:' + str(max_no_processors_files).strip()+'\n')
f.write('MAX_NUMBER_OF_LINES_TO_PROCESS:' + str(max_no_processors_lines).strip()+'\n')
f.write('QUICK_DIRTY:' + str(quick_dirty).strip()+'\n')
f.write('XRF_BIN:' + str(xrf_bin).strip()+'\n')
f.write('NNLS:' + str(nnls).strip()+'\n')
f.write('XANES_SCAN:' + str(xanes_scan).strip()+'\n')
f.write('DETECTOR_TO_START_WITH:' + str(detector_to_start_with).strip()+'\n')
f.close()
os.chdir(jobs_path)
os.remove(statusfile+'_working.txt')
return
#-----------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
settings_filename = sys.argv[1]
settings = Settings.SettingsIO()
settings.load(settings_filename)
if settings.checkSectionKeys(Settings.SECTION_MONITOR, Settings.MONITOR_KEYS) == False:
print 'Error: Could not find all settings in ',settings_filename
print 'Please add the following keys to',settings_filename,'under the section',Settings.SECTION_MONITOR
for key in Settings.MONITOR_KEYS:
print key
sys.exit(1)
monitorSettings = settings.getSetting(Settings.SECTION_MONITOR)
#computer_name = str(platform.node())
main(monitorSettings)
| bsd-2-clause | 696,451,433,837,313,400 | 35.102113 | 105 | 0.61564 | false | 3.146312 | false | false | false |
agateau/qyok | queries.py | 1 | 5142 | # -*- coding: UTF-8 -*-
from datetime import datetime
from operator import attrgetter
from PyQt4.QtCore import QCoreApplication
from sqlobject import AND, OR, LIKE, IN
from sqlobject.sqlbuilder import Select
import yaml
from yokadi.db import Task, Project
def formatDate(date):
"""
strftime may return a string with accent ("August" in fr is "Août" for
example), so we need to turn it into proper unicode.
"""
return unicode(date.strftime("%A %d %B %Y"), "utf-8")
def __tr(txt):
return QCoreApplication.translate("", txt)
def formatDueDate(dueDate):
today = datetime.now()
remaining = (dueDate.date() - today.date()).days
if remaining < 0:
txt = __tr("%1 days overdue").arg(-remaining)
elif remaining == 0:
txt = __tr("Due today")
elif remaining == 1:
txt = __tr("Due tomorrow")
elif remaining < 7:
txt = __tr("%1 days left").arg(remaining)
else:
txt = __tr("%1 weeks left").arg(remaining / 7)
return unicode(txt)
def dueDateCssClass(task):
done = task.status == "done"
if done:
refDate = task.doneDate
else:
refDate = datetime.now()
remaining = (task.dueDate.date() - refDate.date()).days
if done:
if remaining < 0:
return "due-date-overdue"
else:
return ""
if remaining < 0:
return "due-date-overdue"
elif remaining == 0:
return "due-date-today"
elif remaining < 7:
return "due-date-week"
else:
return ""
class Item(object):
__slots__ = ["task", "isNote", "grp1", "grp2", "keywords"]
def __init__(self, task):
self.task = task
keywordDict = task.getKeywordDict()
self.isNote = "_note" in keywordDict
self.keywords = [k for k in keywordDict if k[0] != '_']
class Query(object):
__slots__ = ["name", "defaultProjectName", "defaultKeywordFilters", "projectName", "keywordFilters", "_filters"]
def __init__(self, name):
self.name = name
self.projectName = None
self.defaultProjectName = None
self.keywordFilters = []
self.defaultKeywordFilters = []
self._filters = []
def _addProjectFilter(self):
if self.projectName is None:
return
self._filters.append(
IN(
Task.q.project,
Select(Project.q.id, LIKE(Project.q.name, "%" + self.projectName + "%"))
))
def run(self):
self._filters = []
self._addProjectFilter()
for kwFilter in self.keywordFilters:
self._filters.append(kwFilter.filter())
class DueQuery(Query):
templateName = "index.html"
def __init__(self):
super(DueQuery, self).__init__("Due")
def run(self):
super(DueQuery, self).run()
self._filters.append(OR(Task.q.status == "new", Task.q.status == "started"))
self._filters.append(Task.q.dueDate != None)
tasks = Task.select(AND(*self._filters))
lst = []
for task in tasks:
item = Item(task)
item.grp1 = task.dueDate.date()
item.grp2 = task.project.name
lst.append(item)
fmt1 = formatDate
return dict(lst=lst, fmt1=fmt1)
class ProjectQuery(Query):
templateName = "index.html"
def run(self):
super(ProjectQuery, self).run()
self._filters.append(OR(Task.q.status == "new", Task.q.status == "started"))
tasks = Task.select(AND(*self._filters))
lst = []
for task in tasks:
item = Item(task)
item.grp1 = task.project.name
item.grp2 = ""
lst.append(item)
# Show notes at the end
lst.sort(key=attrgetter("isNote"))
fmt1 = lambda x: x
return dict(lst=lst, fmt1=fmt1)
class DoneQuery(Query):
templateName = "done.html"
__slots__ = ["minDate", "maxDate"]
def __init__(self):
super(DoneQuery, self).__init__("Done")
self.minDate = None
self.maxDate = None
def run(self):
super(DoneQuery, self).run()
self._filters.append(Task.q.status == "done")
if self.minDate is not None:
self._filters.append(Task.q.doneDate >= self.minDate)
if self.maxDate is not None:
self._filters.append(Task.q.doneDate < self.maxDate)
if self.minDate is None and self.maxDate is None:
self._filters.append(Task.q.doneDate != None)
tasks = Task.select(AND(*self._filters))
lst = []
for task in tasks:
item = Item(task)
item.grp1 = task.doneDate.date()
item.grp2 = task.project.name
lst.append(item)
fmt1 = formatDate
return dict(lst=lst, fmt1=fmt1)
def loadProjectQueries(fileName):
def queryFromDict(dct):
query = ProjectQuery(dct["name"])
query.defaultProjectName = dct.get("project_filter")
query.defaultKeywordFilters = dct.get("keyword_filters", [])
return query
lst = yaml.load(open(fileName))
return [queryFromDict(x) for x in lst]
| gpl-3.0 | -5,127,149,142,014,772,000 | 28.210227 | 116 | 0.576347 | false | 3.682665 | false | false | false |
Djabx/mgd | mgdpck/mgd_main.py | 1 | 8718 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
import operator
import itertools
import webbrowser
from mgdpck import logging_util
# init logger first
logging_util.init_logger()
logging_util.add_except_name('run_script')
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
from mgdpck import model
from mgdpck import actions
from mgdpck import data_access
# init of all readers
from mgdpck.readers import *
from mgdpck.writters import *
def _get_parser_sync_level(parser):
group = parser.add_argument_group('sync level')
group.add_argument('-sm', '--meta',
action='store_true', dest='sync_meta',
help='Sync and update meta data (list of books, etc.)')
group.add_argument('-ss', '--struct',
action='store_true', dest='sync_struct', default=False,
help='Sync structures of followed books (chapters, page structure etc.)')
group.add_argument('-si', '--images',
action='store_true', dest='sync_images', default=False,
help='Sync all images')
group.add_argument('-sa', '--all',
action='store_true', dest='sync_all', default=False,
help='Sync meta data, structures and images; equal to -sm -ss -si (default: True with action "follow" or "export")')
group.add_argument('-sn', '--none',
action='store_true', dest='sync_none', default=False,
help='Do not sync anything, disable -sa / -ss / -sm / -si (default: True with others actions than "follow" or "export")')
def _get_parser_selection(parser):
group = parser.add_argument_group('selection')
group.add_argument('-a', '--all-books',
dest='all_books', action='store_true',
help='Selection all books followed.')
group.add_argument('-b', '--book-name',
dest='book_name',
help='Selection of books with the given name (use %% for any)')
group.add_argument('-s', '--site-name',
dest='site_name',
help='Selection of book from the given site (use %% for any)')
group.add_argument('-i', '--book-id',
dest='book_id',
help='Selection of book with the given id.')
group.add_argument('-sc', '--start-chapter',
dest='chapter_start', type=int,
help='The chapter to start with (included). (only with -f or no actions)')
group.add_argument('-ec', '--end-chapter',
dest='chapter_end', type=int,
help='The chapter to end with (included); even if newer chapters appears, we will skip them (only with -f or no actions)')
def _get_parser_actions(parser):
group_ex = parser.add_mutually_exclusive_group()
group_ex.add_argument('--site',
dest='list_site', action='store_true',
help='Liste all known site with their id (disable sync operations).')
group_ex.add_argument('-l', '--list',
dest='list_book', action='store_true',
help='List all know book (disable sync operations)')
group_ex.add_argument('-lf', '--list-followed',
dest='list_followed_book', action='store_true',
help='List followed book (disable sync operations)')
group_ex.add_argument('-f', '--follow',
dest='follow', action='store_true',
help='Mark as follow every book found')
group_ex.add_argument('-u', '--unfollow',
dest='unfollow', action='store_true',
help='Mark as unfollow every book found. (Disable sync operations)')
group_ex.add_argument('-d', '--delete',
dest='delete_book', action='store_true',
help='Delete every book found. (Disable sync operations)')
group_ex.add_argument('-w', '--web',
dest='web', action='store_true',
help='Open web browser on it. (Disable sync operations)')
group_exp = parser.add_mutually_exclusive_group()
for w in sorted(actions.REG_WRITTER.values(), key=operator.methodcaller('get_name')):
group_exp.add_argument('--{}'.format(w.get_name()),
dest='exporter', action='store_const',
const=w,
help='Export as "{}".'.format(w.get_name()))
default_output = os.path.join(os.path.abspath('.'), 'export_output')
parser.add_argument('-o', '--output-dir',
dest='output', action='store',
help='The output directory path during export. (default to: "{}")'.format(default_output),
default=default_output)
def get_parser():
parser = argparse.ArgumentParser(prog='mgd', conflict_handler='resolve')
default_store = os.path.join(os.path.abspath('.'), model.DEFAULT_FILE_DB_NAME)
parser.add_argument('--data',
dest='data_store', action='store',
help='the output where to store all data (default to: "{}")'.format(default_store),
default=default_store)
parser.add_argument('-v', '--verbose',
dest='verbose', action='store_true',
help='Enable verbose output'.format(default_store),
default=False)
_get_parser_sync_level(parser)
_get_parser_selection(parser)
_get_parser_actions(parser)
return parser
def init_default_data_store(args):
sm = model.StoreManager(args.data_store, default=True)
sm.create_db()
return sm
def _find_books(args, s):
lsbs = []
if args.all_books:
lsbs = data_access.find_books_followed(s)
elif args.book_name or args.site_name:
lsbs = data_access.search_book(args.book_name, args.site_name, s)
elif args.book_id:
lsbs = [data_access.find_link_with_id(args.book_id, s)]
return lsbs
def _update_chapter_info(lsbs, args, s):
if args.chapter_start or args.chapter_end:
for lsb in lsbs:
actions.update_one_book_chapters(lsb.id, s)
s.commit()
# we do the search again for updating result
r = find_link_with_id(lsb.id)
if args.chapter_start:
r.min_chapter = args.chapter_start
if args.chapter_end:
r.max_chapter = args.chapter_end
def _make_actions(args, s):
if args.list_site:
for si in data_access.find_all_site(s):
print_site(si)
elif args.list_book:
for lsb in data_access.find_books(s):
print_lsb(lsb, s)
elif args.list_followed_book:
for lsb in data_access.find_books_followed(s):
print_lsb(lsb, s)
elif args.follow:
print('Following book')
lsbs = _find_books(args, s)
for lsb in lsbs:
print_lsb(lsb, s)
lsb.followed = True
_update_chapter_info(lsbs, args, s)
s.commit()
elif args.unfollow:
print('Unfollowing book')
lsbs = _find_books(args, s)
for lsb in lsbs:
print_lsb(lsb, s)
lsb.followed = False
s.commit()
elif args.delete_book:
print('Deleting book')
for lsb in _find_books(args, s):
print_lsb(lsb, s)
actions.delete_book(r, s)
s.commit()
elif args.web:
for lsb in _find_books(args, s):
webbrowser.open(lsb.url)
else:
lsbs = _find_books(args, s)
for lsb in lsbs:
print_lsb(lsb, s)
_update_chapter_info(lsbs, args, s)
def handle_default(parser, args):
logger.debug('out default')
sm = init_default_data_store(args)
with sm.session_scope() as s:
actions.create_all_site(s)
s.commit()
if args.follow or args.exporter is not None:
args.sync_struct = True
args.sync_images = True
if not args.sync_none and (args.sync_all or args.sync_meta):
logger.info('update all books')
actions.update_books_all_site(s)
s.commit()
_make_actions(args, s)
if not args.sync_none and (args.sync_all or args.sync_struct):
logger.info('update chapters')
actions.update_all_chapters(s)
s.commit()
logger.info('update pages')
actions.update_all_pages(s)
s.commit()
if not args.sync_none and (args.sync_all or args.sync_images):
logger.info('update all images')
# /!\ we use sm and not s because we have threads after this
# data are commited after the update
actions.update_all_images(sm)
if args.exporter:
lsbs = _find_books(args, s)
if len(lsbs) > 0:
actions.export_book(args.exporter, args.output, lsbs, args.chapter_start, args.chapter_end, s)
def print_lsb(lsb, s):
print('{0.id:>6} {1} '.format(lsb, '+' if lsb.followed else ' '), end='')
sys.stdout.buffer.write(lsb.book.short_name.encode('utf8'))
print(' on {0.site.name}'.format(lsb))
if data_access.count_book_chapters(lsb, s) > 0:
print('\tchapters: {0:0>3} - {1:0>3} [{2}, {3}]'.format(
lsb.chapters[0].num,
lsb.chapters[-1].num,
lsb.min_chapter if lsb.min_chapter is not None else 1,
lsb.max_chapter if lsb.max_chapter is not None else '*'
))
def print_site(si):
print('Site: "{0.name}" @ {0.hostname}'.format(si))
def main():
parser = get_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
args.func = handle_default
if args.verbose:
logging_util.make_verbose()
import pprint
logger.debug('arguments: %s', pprint.pformat(args))
args.func(parser, args)
| apache-2.0 | -7,672,336,189,648,278,000 | 28.958763 | 126 | 0.652099 | false | 3.28238 | false | false | false |
textbook/atmdb | atmdb/utils.py | 1 | 4517 | """Utilities for working with TMDb models."""
async def overlapping_movies(people, client=None):
"""Find movies that the same people have been in.
Arguments:
people (:py:class:`collections.abc.Sequence`): The
:py:class:`~.Person` objects to find overlapping movies for.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
Returns:
:py:class:`list`: The relevant :py:class:`~.Movie` objects.
"""
return await _overlap(people, 'movie_credits', client, 'get_movie')
async def overlapping_actors(movies, client=None):
"""Find actors that appear in the same movies.
Arguments:
movies (:py:class:`collections.abc.Sequence`): The
:py:class:`~.Movie` objects to find overlapping actors for.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
Returns:
:py:class:`list`: The relevant :py:class:`~.Person` objects.
"""
return await _overlap(movies, 'cast', client, 'get_person')
async def find_overlapping_movies(names, client):
"""Find movies that the same people have been in.
Warning:
This function requires two API calls per name submitted, plus
one API call per overlapping movie in the result; it is therefore
relatively slow.
Arguments:
names (:py:class:`collections.abc.Sequence`): The names of the
people to find overlapping movies for.
client (:py:class:`~.TMDbClient`): The TMDb client.
Returns:
:py:class:`list`: The relevant :py:class:`~.Movie` objects.
"""
return await _find_overlap(names, client, 'find_person', 'get_person',
overlapping_movies)
async def find_overlapping_actors(titles, client):
"""Find actors that have been in the same movies.
Warning:
This function requires two API calls per title submitted, plus
one API call per overlapping person in the result; it is therefore
relatively slow.
Arguments:
titles (:py:class:`collections.abc.Sequence`): The titles of the
movies to find overlapping actors for.
client (:py:class:`~.TMDbClient`): The TMDb client.
Returns:
:py:class:`list`: The relevant :py:class:`~.Person` objects.
"""
return await _find_overlap(titles, client, 'find_movie', 'get_movie',
overlapping_actors)
async def _overlap(items, overlap_attr, client=None, get_method=None):
"""Generic overlap implementation.
Arguments:
item (:py:class:`collections.abc.Sequence`): The objects to
find overlaps for.
overlap_attr (:py:class:`str`): The attribute of the items to use
as input for the overlap.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
get_method (:py:class:`str`, optional): The method of the
client to use for extracting additional information.
Returns:
:py:class:`list`: The relevant result objects.
"""
overlap = set.intersection(*(getattr(item, overlap_attr) for item in items))
if client is None or get_method is None:
return overlap
results = []
for item in overlap:
result = await getattr(client, get_method)(id_=item.id_)
results.append(result)
return results
async def _find_overlap(queries, client, find_method, get_method,
overlap_function):
"""Generic find and overlap implementation.
Arguments
names (:py:class:`collections.abc.Sequence`): The queries of the
people to find overlaps for.
client (:py:class:`~.TMDbClient`): The TMDb client.
find_method (:py:class:`str`): The name of the client method to
use for finding candidates.
get_method (:py:class:`str`): The name of the client method to
use for getting detailed information on a candidate.
overlap_function (:py:class:`collections.abc.Callable`): The
function to call for the resulting overlap.
"""
results = []
for query in queries:
candidates = await getattr(client, find_method)(query)
if not candidates:
raise ValueError('no result found for {!r}'.format(query))
result = await getattr(client, get_method)(id_=candidates[0].id_)
results.append(result)
return await overlap_function(results, client)
| isc | 4,149,155,471,548,165,600 | 34.566929 | 80 | 0.654195 | false | 4.277462 | false | false | false |
landscapeio/prospector | prospector2/formatters/text.py | 1 | 3108 | from prospector2.formatters.base import Formatter
__all__ = (
'TextFormatter',
)
# pylint: disable=unnecessary-lambda
class TextFormatter(Formatter):
summary_labels = (
('started', 'Started'),
('completed', 'Finished'),
('time_taken', 'Time Taken', lambda x: '%s seconds' % x),
('formatter', 'Formatter'),
('profiles', 'Profiles'),
('strictness', 'Strictness'),
('libraries', 'Libraries Used', lambda x: ', '.join(x)),
('tools', 'Tools Run', lambda x: ', '.join(x)),
('adaptors', 'Adaptors', lambda x: ', '.join(x)),
('message_count', 'Messages Found'),
('external_config', 'External Config'),
)
def render_summary(self):
output = [
'Check Information',
'=================',
]
label_width = max([len(label[1]) for label in self.summary_labels])
for summary_label in self.summary_labels:
key = summary_label[0]
if key in self.summary:
label = summary_label[1]
if len(summary_label) > 2:
value = summary_label[2](self.summary[key])
else:
value = self.summary[key]
output.append(
' %s: %s' % (
label.rjust(label_width),
value,
)
)
return '\n'.join(output)
# pylint: disable=no-self-use
def render_message(self, message):
output = []
if message.location.module:
output.append('%s (%s):' % (
message.location.module,
message.location.path
))
else:
output.append('%s:' % message.location.path)
output.append(
' L%s:%s %s: %s - %s' % (
message.location.line or '-',
message.location.character if message.location.character else '-',
message.location.function,
message.source,
message.code,
)
)
output.append(' %s' % message.message)
return '\n'.join(output)
def render_messages(self):
output = [
'Messages',
'========',
'',
]
for message in self.messages:
output.append(self.render_message(message))
output.append('')
return '\n'.join(output)
def render_profile(self):
output = [
'Profile',
'=======',
'',
self.profile.as_yaml().strip()
]
return '\n'.join(output)
def render(self, summary=True, messages=True, profile=False):
output = []
if messages and self.messages: # if there are no messages, don't render an empty header
output.append(self.render_messages())
if profile:
output.append(self.render_profile())
if summary:
output.append(self.render_summary())
return '\n\n\n'.join(output) + '\n'
| gpl-2.0 | 6,796,904,696,348,735,000 | 27.254545 | 96 | 0.47973 | false | 4.334728 | false | false | false |
google-research/episodic-curiosity | third_party/baselines/common/misc_util.py | 1 | 7608 | # coding=utf-8
import gym
import numpy as np
import os
import pickle
import random
import tempfile
import zipfile
def zipsame(*seqs):
L = len(seqs[0])
assert all(len(seq) == L for seq in seqs[1:])
return zip(*seqs)
def unpack(seq, sizes):
"""
Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.
None = just one bare element, not a list
Example:
unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])
"""
seq = list(seq)
it = iter(seq)
assert sum(1 if s is None else s for s in sizes) == len(seq), "Trying to unpack %s into %s" % (seq, sizes)
for size in sizes:
if size is None:
yield it.__next__()
else:
li = []
for _ in range(size):
li.append(it.__next__())
yield li
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute'
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
"""Keep a running estimate of a quantity. This is a bit like mean
but more sensitive to recent changes.
Parameters
----------
gamma: float
Must be between 0 and 1, where 0 is the most sensitive to recent
changes.
init_value: float or None
Initial value of the estimate. If None, it will be set on the first update.
"""
self._value = init_value
self._gamma = gamma
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val
def __float__(self):
"""Get the current estimate"""
return self._value
def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname)
def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path)
def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f)
| apache-2.0 | -6,773,599,826,624,659,000 | 28.374517 | 110 | 0.59674 | false | 4.094726 | false | false | false |
molly24Huang/Cents_trip | Recommendation/attr_food_distance.py | 1 | 2978 | import pandas as pd
from math import sin, cos, sqrt, asin, radians
#import ibm_db
def cal_dist(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
distance = 6378.137 * c
return distance
food = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\food.csv'
tourism_attractions = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\TOURISM_ATTRACTIONS.csv'
food_df = pd.read_csv(food)
tourism_attractions_df = pd.read_csv(tourism_attractions)
food_data = food_df.iloc[:,[0,6,7]]
tourism_attractions_data = tourism_attractions_df.iloc[:,[0,2,3]]
foodid = food_data['FOODID'].as_matrix()
#print(len(roomid))
lat_food = food_data['LATITUDE'].as_matrix()
lng_food = food_data['LONGITUDE'].as_matrix()
attractionid = tourism_attractions_data['ATTRACTIONID'].as_matrix()
#print(attractionid)
lat_attractions = tourism_attractions_data['LATITUDE'].as_matrix()
lng_attractions = tourism_attractions_data['LONGITUDE'].as_matrix()
distances = []
# conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\
# PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\
# PWD=X_c03EeYTe#u;", "", "")
for i in range(len(tourism_attractions_data)):
for k in range(len(food_data)):
distance = cal_dist(lng_attractions[i], lat_attractions[i], lng_food[k], lat_food[k])
# print(distance)
distances.append(distance)
output = open('rating.txt','w')
k = 1
for i in range(len(tourism_attractions_data)):
for j in range(len(food_data)):
this_attractid = str(attractionid[i])
this_foodid = str(foodid[j])
this_distance = str(distances[(i + 1)* j])
output.write(this_attractid)
output.write('\t')
output.write(this_foodid)
output.write('\t')
output.write(this_distance)
output.write('\n')
output.close()
#print(len(distances))
# k = 1
# for i in range(len(tourism_attractions_data)):
# for j in range(len(food_data)):
# this_attractid = attractionid[i]
# this_foodid = foodid[j]
# this_distance = distances[(i + 1)* j]
# sql = r'INSERT INTO DISTANCE_FOOD_ATTRACTION(ATTRACTIONID, FOODID, DISTANCE) VALUES({attractionID}, {foodID}, {distance})'.format(
# attractionID=this_attractid, foodID=this_foodid, distance=this_distance
# )
# print(sql, '>>')
# try:
# stmt = ibm_db.exec_immediate(conn, sql)
# except Exception as e:
# print(e)
# print("Inserting couldn't be completed.")
# ibm_db.rollback(conn)
# else:
# ibm_db.commit(conn)
# print("Inserting complete.")
# print('-----' + str(k) + '-----')
# k += 1
# #
| apache-2.0 | 2,555,478,355,868,660,000 | 33.627907 | 140 | 0.611148 | false | 2.838894 | false | false | false |
mjhennig/dyn-python | dyn/tm/services/httpredirect.py | 1 | 5031 | # -*- coding: utf-8 -*-
"""This module contains API Wrapper implementations of the HTTP Redirect service
"""
import logging
from ..session import DynectSession
from ...compat import force_unicode
__author__ = 'xorg'
__all__ = ['HTTPRedirect']
class HTTPRedirect(object):
"""HTTPRedirect is a service which sets up a redirect to the specified URL.//
"""
def __init__(self, zone, fqdn, *args, **kwargs):
"""Create a new :class:`HTTPRedirect` service object
:param zone: The zone to attach this HTTPRedirect Service to
:param fqdn: The FQDN of the node where this service will be attached
:param code: HTTP response code to return for redirection.
:param url: The target URL where the client is sent. Must begin with either http:// or https://
:param keep_uri: A flag indicating whether the redirection should include the originally requested URI.
"""
super(HTTPRedirect, self).__init__()
self._zone = zone
self._fqdn = fqdn
self._code = self._url = self._keep_uri = None
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
setattr(self, '_' + key, val)
elif len(args) + len(kwargs) == 1:
self._get()
else:
self._post(*args, **kwargs)
def _get(self):
"""Build an object around an existing DynECT HTTPRedirect Service"""
self.uri = '/HTTPRedirect/{}/{}/'.format(self._zone, self._fqdn)
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(self.uri, 'GET',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _post(self, code, keep_uri, url):
"""Create a new HTTPRedirect Service on the DynECT System"""
self._code = code
self._keep_uri = keep_uri
self._url = url
self.uri = '/HTTPRedirect/{}/{}/'.format(self._zone, self._fqdn)
api_args = {'code': self._code, 'keep_uri': self._keep_uri, 'url': self._url}
response = DynectSession.get_session().execute(self.uri, 'POST',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _update(self, **kwargs):
"""Update an existing HTTPRedirect Service on the DynECT System"""
self._code = kwargs.get('code',self._code)
self._keep_uri = kwargs.get('keep_uri',self.keep_uri)
self._url = kwargs.get('url',self._url)
self.uri = '/HTTPRedirect/{}/{}/'.format(self._zone, self._fqdn)
api_args = {'code': self._code, 'keep_uri': self._keep_uri, 'url': self._url}
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
@property
def zone(self):
"""The zone that this HTTPRedirect Service is attached to is a read-only
attribute
"""
self._get()
return self._zone
@zone.setter
def zone(self, value):
pass
@property
def fqdn(self):
"""The fqdn that this HTTPRedirect Service is attached to is a read-only
attribute
"""
self._get()
return self._fqdn
@fqdn.setter
def fqdn(self, value):
pass
@property
def code(self):
"""HTTP response code to return for redirection.
Valid values:
301 – Permanent redirect
302 – Temporary redirect
"""
self._get()
return self._code
@code.setter
def code(self, value):
self._update(code=value)
@property
def keep_uri(self):
"""A flag indicating whether the redirection should include the originally requested URI.
Valid values: Y, N
"""
self._get()
return self._keep_uri
@keep_uri.setter
def keep_uri(self, value):
self._update(keep_uri=value)
@property
def url(self):
"""The target URL where the client is sent. Must begin with either http:// or https://"""
self._get()
return self._url
@url.setter
def url(self, value):
self._update(url=value)
def delete(self, publish='Y'):
"""Delete this HTTPRedirect service from the DynECT System
publish='N' can be passed into this function to do a soft-delete which will be
acted upon during a zone publish.
"""
api_args = {'publish' :publish}
DynectSession.get_session().execute(self.uri, 'DELETE', api_args)
def __str__(self):
"""str override"""
return force_unicode('<HTTPRedirect>: {}').format(self._fqdn)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
| bsd-3-clause | 8,570,874,800,027,544,000 | 33.909722 | 112 | 0.560971 | false | 4.137449 | false | false | false |
emvarun/followup-and-location | Sky_Patch.py | 1 | 4646 | #!/usr/bin/python
import os, re
import numpy as np
import healpy as hp
import astropy.units as u
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_sun
from astropy.time import Time
from astropy.io import fits
import ephem
from ephem import *
from params import Observatory_Locations
def Patch(fitsfile, verbose=False, prob_cover=0.99):
''' Reads the patch and considers only an area upto the prob_cover variable.
'''
(pixProb, header) = hp.read_map(fitsfile, field=0, nest=False, hdu=1, h=True, verbose=False, memmap=False)
nside = hp.npix2nside(len(pixProb))
theta, phi = hp.pix2ang(nside, np.arange(0, len(pixProb)), nest=False)
total_prob = np.sum(pixProb)
pixArea = hp.nside2pixarea(nside, degrees = 'True')
nonzero = pixProb > 0.0
nonzeroProb, nonzeroTheta, nonzeroPhi = pixProb[nonzero], theta[nonzero], phi[nonzero]
order = np.argsort(-nonzeroProb)
sortedProb, sortedTheta, sortedPhi = nonzeroProb[order], nonzeroTheta[order], nonzeroPhi[order]
# Now select top prob_cover %
SigPix = np.cumsum(sortedProb) <= prob_cover
if verbose:
rejPix = np.cumsum(nonzeroProb) >= prob_cover
fin_pixProb = sortedProb[SigPix]
fin_theta, fin_phi = sortedTheta[SigPix], sortedPhi[SigPix]
return fin_pixProb, fin_theta, fin_phi, nside, pixArea
def onSkyPatch(pixprob, fin_theta, fin_phi, total_prob, obsName, tim, twilight=18., verbose=False):
''' Modifying the patch accordingly as it becomes observable. The time step after
which the changed sky position of the sky is considered is defined by the variable
stepsize in params file.
'''
RA, Dec = np.rad2deg(fin_phi), np.rad2deg(np.pi/2.0 - fin_theta) # RA & Dec of pixels
skycords = SkyCoord(RA*u.deg, Dec*u.deg)
otime = tim.iso
altaz = skycords.transform_to(AltAz(location=Observatory_Locations[obsName].location, obstime=otime))
alt, az = altaz.alt.degree, altaz.az.degree
aboveSky = alt > Observatory_Locations[obsName].horizon
above_alt, above_az, Prob = alt[aboveSky], az[aboveSky], pixprob[aboveSky]
abovSkyProb = np.sum(Prob)
sun_below = get_sun(tim).transform_to(AltAz(location=Observatory_Locations[obsName].location, obstime=otime)).alt.degree < -np.abs(twilight)
if(abovSkyProb*sun_below != 0):
obs_prob = pixprob[aboveSky]
pixprob[aboveSky] = 0.0
else:
obs_prob = 0.0
return [above_alt, above_az, Prob], [abovSkyProb, abovSkyProb*sun_below, total_prob - abovSkyProb*sun_below, sun_below], pixprob, obs_prob
def totalSkyPatch(fitsfile, pixprob, theta, phi, obsName, nsteps, h, twilight=18., verbose=False):
''' Returns the total probability visible for a patch, given some time t to follow the
patch after the trigger. This variable is defined in the params file as Texp.
'''
(pixelProb, header) = hp.read_map(fitsfile, field=0, nest=False, hdu=1, h=True, verbose=False, memmap=False)
total_prob = np.sum(pixelProb)
f = fits.open(fitsfile)
stim= f[1].header["DATE-OBS"]
detectors = f[1].header["INSTRUME"]
time = stim[:10]+" "+stim[11:]
time = Time( time, format = 'iso', scale = 'utc')
time = time.mjd
probObserve = []
thetaObserve = []
phiObserve = []
nObserve = 0.0
for l in range(0, nsteps):
tim = time + h*l*second
tim = Time(tim, format = 'mjd')
aboveSky, instt_vis, pixprob, obs_prob = onSkyPatch(pixprob, theta, phi, total_prob, obsName, tim)
if(np.sum(obs_prob) > 0.0000001):
obs_prob = [x for x in obs_prob if x != 0]
obs_prob = np.array(obs_prob).tolist()
probObserve = probObserve + obs_prob
nObserve = float(len(obs_prob)) + nObserve
return probObserve, nObserve, [stim, detectors]
def Coverage(fitsfile, obsName, Texp, NsqDeg, h):
''' Returns the probability covered for a given patch and a specific location -- given
that location has a covering capability of N square degree.
A small note: the value (higher) or the number of values given in NsqDeg do not effect
the computation time.
'''
# Texp is how many hours after the trigger one could possibly followup
Texp2secs = Texp*3600
nsteps = Texp2secs/h
fin_pixProb, fin_theta, fin_phi, nside, pixArea = Patch(fitsfile)
probObserve, nObserve, timdet = totalSkyPatch(fitsfile, fin_pixProb, fin_theta, fin_phi, obsName, nsteps, h)
probObserve = sorted(probObserve, reverse=True)
cumProb = np.cumsum(probObserve)
nceil = [0.]*len(NsqDeg)
n = [0.]*len(NsqDeg)
n.append(timdet)
for i in range (0, len(NsqDeg)):
nceil[i] = np.ceil(NsqDeg[i]/pixArea)
for i in range(0, len(NsqDeg)):
area = nceil[i]*pixArea
if(nObserve != 0):
if(nceil[i] < nObserve):
n[i] = [ area, cumProb[nceil[i]] ]
else:
n[i] = [ area, cumProb[nObserve-1] ]
else:
n[i] = [area, 0.]
return n
| gpl-3.0 | 6,250,973,748,818,928,000 | 38.372881 | 141 | 0.71201 | false | 2.702734 | false | false | false |
mozilla/socorro | webapp-django/crashstats/crashstats/tests/test_bugassociations.py | 1 | 4928 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import io
import requests_mock
import pytest
from django.core.management import call_command
from crashstats.crashstats.models import BugAssociation
from crashstats.crashstats.management.commands.bugassociations import (
BUGZILLA_BASE_URL,
find_signatures,
)
SAMPLE_BUGZILLA_RESULTS = {
"bugs": [
{"id": "1", "cf_crash_signature": "This sig, while bogus, has a ] bracket"},
{
"id": "2",
"cf_crash_signature": "single [@ BogusClass::bogus_sig (const char**) ] signature",
},
{
"id": "3",
"cf_crash_signature": "[@ js3250.dll@0x6cb96] [@ valid.sig@0x333333]",
},
{
"id": "4",
"cf_crash_signature": "[@ layers::Push@0x123456] [@ layers::Push@0x123456]",
},
{
"id": "5",
"cf_crash_signature": (
"[@ MWSBAR.DLL@0x2589f] and a broken one [@ sadTrombone.DLL@0xb4s455"
),
},
{"id": "6", "cf_crash_signature": ""},
{
"id": "7",
"cf_crash_signature": "[@gfx::font(nsTArray<nsRefPtr<FontEntry> > const&)]",
},
{
"id": "8",
"cf_crash_signature": "[@ legitimate(sig)] \n junk \n [@ another::legitimate(sig) ]",
},
{"id": "42"},
]
}
class TestBugAssociationsCommand:
def fetch_data(self):
return [
{"bug_id": ba.bug_id, "signature": ba.signature}
for ba in BugAssociation.objects.order_by("bug_id", "signature")
]
def insert_data(self, bug_id, signature):
BugAssociation.objects.create(bug_id=bug_id, signature=signature)
def test_basic_run_job(self, db):
with requests_mock.Mocker() as req_mock:
req_mock.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
out = io.StringIO()
call_command("bugassociations", stdout=out)
associations = self.fetch_data()
# Verify we have the expected number of associations
assert len(associations) == 8
bug_ids = set([x["bug_id"] for x in associations])
# Verify bugs with no crash signatures are missing
assert 6 not in bug_ids
bug_8_signatures = [
item["signature"] for item in associations if item["bug_id"] == 8
]
# New signatures have correctly been inserted
assert len(bug_8_signatures) == 2
assert "another::legitimate(sig)" in bug_8_signatures
assert "legitimate(sig)" in bug_8_signatures
def test_run_job_with_reports_with_existing_bugs_different(self, db):
"""Verify that an association to a signature that no longer is part
of the crash signatures list gets removed.
"""
self.insert_data(bug_id="8", signature="@different")
with requests_mock.Mocker() as req_mock:
req_mock.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
out = io.StringIO()
call_command("bugassociations", stdout=out)
# The previous association, to signature '@different' that is not in
# crash signatures, is now missing
associations = self.fetch_data()
assert "@different" not in [item["signature"] for item in associations]
def test_run_job_with_reports_with_existing_bugs_same(self, db):
self.insert_data(bug_id="8", signature="legitimate(sig)")
with requests_mock.Mocker() as req_mock:
req_mock.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
out = io.StringIO()
call_command("bugassociations", stdout=out)
associations = self.fetch_data()
associations = [
item["signature"] for item in associations if item["bug_id"] == 8
]
# New signatures have correctly been inserted
assert len(associations) == 2
assert associations == ["another::legitimate(sig)", "legitimate(sig)"]
@pytest.mark.parametrize(
"content, expected",
[
# Simple signature
("[@ moz::signature]", set(["moz::signature"])),
# Using unicode.
("[@ moz::signature]", set(["moz::signature"])),
# 2 signatures and some junk
(
"@@3*&^!~[@ moz::signature][@ ns::old ]",
set(["moz::signature", "ns::old"]),
),
# A signature containing square brackets.
(
"[@ moz::signature] [@ sig_with[brackets]]",
set(["moz::signature", "sig_with[brackets]"]),
),
# A malformed signature.
("[@ note there is no trailing bracket", set()),
],
)
def test_find_signatures(content, expected):
assert find_signatures(content) == expected
| mpl-2.0 | 2,033,320,415,098,187,800 | 32.986207 | 97 | 0.576096 | false | 3.71644 | true | false | false |
all-out/lightswitch | lightswitch/main/migrations/0002_auto__del_members__del_ships__add_ship__add_member.py | 1 | 3070 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Members'
db.delete_table(u'main_members')
# Deleting model 'Ships'
db.delete_table(u'main_ships')
# Adding model 'Ship'
db.create_table(u'main_ship', (
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'main', ['Ship'])
# Adding model 'Member'
db.create_table(u'main_member', (
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('logoff_ts', self.gf('django.db.models.fields.DateTimeField')()),
('join_ts', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'main', ['Member'])
def backwards(self, orm):
# Adding model 'Members'
db.create_table(u'main_members', (
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('logoff_ts', self.gf('django.db.models.fields.DateTimeField')()),
('join_ts', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'main', ['Members'])
# Adding model 'Ships'
db.create_table(u'main_ships', (
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'main', ['Ships'])
# Deleting model 'Ship'
db.delete_table(u'main_ship')
# Deleting model 'Member'
db.delete_table(u'main_member')
models = {
u'main.location': {
'Meta': {'object_name': 'Location'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.member': {
'Meta': {'object_name': 'Member'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'join_ts': ('django.db.models.fields.DateTimeField', [], {}),
'logoff_ts': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.ship': {
'Meta': {'object_name': 'Ship'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['main'] | mit | -8,889,125,126,581,968,000 | 38.371795 | 88 | 0.558306 | false | 3.628842 | false | false | false |
dparks1134/STAMP | stamp/plugins/samples/plots/configGUI/multCompCorrectionUI.py | 1 | 12647 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'multCompCorrection.ui'
#
# Created: Sat Apr 16 13:41:52 2011
# by: PyQt4 UI code generator 4.6.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MultCompCorrectionDialog(object):
def setupUi(self, MultCompCorrectionDialog):
MultCompCorrectionDialog.setObjectName("MultCompCorrectionDialog")
MultCompCorrectionDialog.resize(716, 162)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MultCompCorrectionDialog.sizePolicy().hasHeightForWidth())
MultCompCorrectionDialog.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/programIcon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MultCompCorrectionDialog.setWindowIcon(icon)
self.verticalLayout_3 = QtGui.QVBoxLayout(MultCompCorrectionDialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.groupBox_3 = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox_3.setObjectName("groupBox_3")
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.lblFigureWidth = QtGui.QLabel(self.groupBox_3)
self.lblFigureWidth.setObjectName("lblFigureWidth")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblFigureWidth)
self.spinFigWidth = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigWidth.setDecimals(2)
self.spinFigWidth.setMinimum(2.0)
self.spinFigWidth.setMaximum(20.0)
self.spinFigWidth.setSingleStep(0.5)
self.spinFigWidth.setProperty("value", 6.5)
self.spinFigWidth.setObjectName("spinFigWidth")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.spinFigWidth)
self.lblFigureHeight = QtGui.QLabel(self.groupBox_3)
self.lblFigureHeight.setObjectName("lblFigureHeight")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblFigureHeight)
self.spinFigHeight = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigHeight.setMinimum(2.0)
self.spinFigHeight.setMaximum(12.0)
self.spinFigHeight.setSingleStep(0.5)
self.spinFigHeight.setProperty("value", 6.5)
self.spinFigHeight.setObjectName("spinFigHeight")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.spinFigHeight)
self.horizontalLayout_6.addLayout(self.formLayout_2)
self.horizontalLayout_5.addWidget(self.groupBox_3)
self.groupBox_8 = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox_8.setObjectName("groupBox_8")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_8)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.lblBinWidth = QtGui.QLabel(self.groupBox_8)
self.lblBinWidth.setObjectName("lblBinWidth")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblBinWidth)
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.spinBinWidth = QtGui.QDoubleSpinBox(self.groupBox_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBinWidth.sizePolicy().hasHeightForWidth())
self.spinBinWidth.setSizePolicy(sizePolicy)
self.spinBinWidth.setDecimals(4)
self.spinBinWidth.setMinimum(0.0001)
self.spinBinWidth.setMaximum(10000.0)
self.spinBinWidth.setSingleStep(0.0001)
self.spinBinWidth.setProperty("value", 0.002)
self.spinBinWidth.setObjectName("spinBinWidth")
self.horizontalLayout_9.addWidget(self.spinBinWidth)
spacerItem = QtGui.QSpacerItem(1, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem)
self.formLayout.setLayout(0, QtGui.QFormLayout.FieldRole, self.horizontalLayout_9)
self.label = QtGui.QLabel(self.groupBox_8)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.spinXlimitFig1 = QtGui.QDoubleSpinBox(self.groupBox_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinXlimitFig1.sizePolicy().hasHeightForWidth())
self.spinXlimitFig1.setSizePolicy(sizePolicy)
self.spinXlimitFig1.setDecimals(4)
self.spinXlimitFig1.setMinimum(0.0001)
self.spinXlimitFig1.setMaximum(10000.0)
self.spinXlimitFig1.setSingleStep(0.01)
self.spinXlimitFig1.setProperty("value", 0.05)
self.spinXlimitFig1.setObjectName("spinXlimitFig1")
self.horizontalLayout_7.addWidget(self.spinXlimitFig1)
self.btnXmaxFig1 = QtGui.QPushButton(self.groupBox_8)
self.btnXmaxFig1.setObjectName("btnXmaxFig1")
self.horizontalLayout_7.addWidget(self.btnXmaxFig1)
self.formLayout.setLayout(1, QtGui.QFormLayout.FieldRole, self.horizontalLayout_7)
self.verticalLayout_4.addLayout(self.formLayout)
self.chkLogScale = QtGui.QCheckBox(self.groupBox_8)
self.chkLogScale.setObjectName("chkLogScale")
self.verticalLayout_4.addWidget(self.chkLogScale)
self.horizontalLayout_5.addWidget(self.groupBox_8)
self.groupBox_2 = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.spinXlimitFig2 = QtGui.QDoubleSpinBox(self.groupBox_2)
self.spinXlimitFig2.setDecimals(4)
self.spinXlimitFig2.setMinimum(0.0001)
self.spinXlimitFig2.setMaximum(10000.0)
self.spinXlimitFig2.setSingleStep(0.01)
self.spinXlimitFig2.setProperty("value", 0.05)
self.spinXlimitFig2.setObjectName("spinXlimitFig2")
self.horizontalLayout_4.addWidget(self.spinXlimitFig2)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.btnXmaxFig2 = QtGui.QPushButton(self.groupBox_2)
self.btnXmaxFig2.setObjectName("btnXmaxFig2")
self.horizontalLayout_2.addWidget(self.btnXmaxFig2)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_5.addWidget(self.groupBox_2)
self.groupBox = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.spinXlimitFig3 = QtGui.QDoubleSpinBox(self.groupBox)
self.spinXlimitFig3.setDecimals(4)
self.spinXlimitFig3.setMinimum(0.0001)
self.spinXlimitFig3.setMaximum(10000.0)
self.spinXlimitFig3.setSingleStep(0.01)
self.spinXlimitFig3.setProperty("value", 0.05)
self.spinXlimitFig3.setObjectName("spinXlimitFig3")
self.horizontalLayout.addWidget(self.spinXlimitFig3)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.btnXmaxFig3 = QtGui.QPushButton(self.groupBox)
self.btnXmaxFig3.setObjectName("btnXmaxFig3")
self.horizontalLayout_8.addWidget(self.btnXmaxFig3)
self.verticalLayout.addLayout(self.horizontalLayout_8)
self.horizontalLayout_5.addWidget(self.groupBox)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.buttonBox = QtGui.QDialogButtonBox(MultCompCorrectionDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout_3.addWidget(self.buttonBox)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.retranslateUi(MultCompCorrectionDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), MultCompCorrectionDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), MultCompCorrectionDialog.reject)
QtCore.QMetaObject.connectSlotsByName(MultCompCorrectionDialog)
def retranslateUi(self, MultCompCorrectionDialog):
MultCompCorrectionDialog.setWindowTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Multiple comparison plots", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Main figure size", None, QtGui.QApplication.UnicodeUTF8))
self.lblFigureWidth.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Width:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFigureHeight.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Height:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_8.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Histogram plot", None, QtGui.QApplication.UnicodeUTF8))
self.lblBinWidth.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Bin width:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "x-axis limit:", None, QtGui.QApplication.UnicodeUTF8))
self.btnXmaxFig1.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Max", None, QtGui.QApplication.UnicodeUTF8))
self.chkLogScale.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Show y-axis as log scale", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Correction plot", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "x-axis limit:", None, QtGui.QApplication.UnicodeUTF8))
self.btnXmaxFig2.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Max", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Significant features plot", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "x-axis limit:", None, QtGui.QApplication.UnicodeUTF8))
self.btnXmaxFig3.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Max", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -3,321,368,441,627,692,500 | 63.19797 | 172 | 0.740887 | false | 3.832424 | false | false | false |
rh-s/heat | heat/engine/resources/openstack/manila/security_service.py | 1 | 3666 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class SecurityService(resource.Resource):
"""
A security_service is a set of options that defines a security domain
for a particular shared filesystem protocol, such as an
Active Directory domain or a Kerberos domain.
"""
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, TYPE, DNS_IP, SERVER, DOMAIN, USER,
PASSWORD, DESCRIPTION
) = (
'name', 'type', 'dns_ip', 'server', 'domain', 'user',
'password', 'description'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Security service name.'),
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Security service type.'),
required=True,
constraints=[
constraints.AllowedValues(['ldap', 'kerberos',
'active_directory'])
]
),
DNS_IP: properties.Schema(
properties.Schema.STRING,
_('DNS IP address used inside tenant\'s network.'),
update_allowed=True
),
SERVER: properties.Schema(
properties.Schema.STRING,
_('Security service IP address or hostname.'),
update_allowed=True
),
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Security service domain.'),
update_allowed=True
),
USER: properties.Schema(
properties.Schema.STRING,
_('Security service user or group used by tenant.'),
update_allowed=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password used by user.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Security service description.'),
update_allowed=True
)
}
default_client_name = 'manila'
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
security_service = self.client().security_services.create(**args)
self.resource_id_set(security_service.id)
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None):
if prop_diff:
self.client().security_services.update(self.resource_id,
**prop_diff)
def handle_delete(self):
if self.resource_id is None:
return
try:
self.client().security_services.delete(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def resource_mapping():
return {
'OS::Manila::SecurityService': SecurityService
}
| apache-2.0 | -3,208,775,644,978,497,000 | 32.027027 | 79 | 0.589198 | false | 4.5825 | false | false | false |
benletchford/stratego.io | gae/tests/FIXTURES.py | 1 | 2396 | import json
import copy
SETUP = [
[
{'rank': '1', 'side': 3},
{'rank': '2', 'side': 3},
{'rank': '3', 'side': 3},
{'rank': '3', 'side': 3},
{'rank': '4', 'side': 3},
{'rank': '4', 'side': 3},
{'rank': '4', 'side': 3},
{'rank': '5', 'side': 3},
{'rank': '5', 'side': 3},
{'rank': '5', 'side': 3}
],
[
{'rank': '5', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '8', 'side': 3}
],
[
{'rank': '8', 'side': 3},
{'rank': '8', 'side': 3},
{'rank': '8', 'side': 3},
{'rank': '8', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3}
],
[
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': 'S', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'F', 'side': 3}
]
]
SETUP_0 = copy.deepcopy(SETUP)
for row in SETUP_0:
for cell in row:
cell['side'] = 0
SETUP_1 = copy.deepcopy(SETUP)
SETUP_1 = SETUP_1[::-1]
for i in xrange(0, len(SETUP_1)):
SETUP_1[i] = SETUP_1[i][::-1]
for row in SETUP_1:
for cell in row:
cell['side'] = 1
DEFAULT_GAME = SETUP_1 + [
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0]
] + SETUP_0
MARSHAL = {
'rank': '1',
'side': 0
}
GENERAL = {
'rank': '2',
'side': 0
}
COLONEL = {
'rank': '3',
'side': 0
}
MAJOR = {
'rank': '4',
'side': 0
}
CAPTAIN = {
'rank': '5',
'side': 0
}
LIEUTENANT = {
'rank': '6',
'side': 0
}
SERGEANT = {
'rank': '7',
'side': 0
}
MINER = {
'rank': '8',
'side': 0
}
SCOUT = {
'rank': '9',
'side': 0
}
SPY = {
'rank': 'S',
'side': 0
}
FLAG = {
'rank': 'F',
'side': 0
}
BOMB = {
'rank': 'B',
'side': 0
}
| mit | 1,294,217,295,813,873,200 | 17.151515 | 39 | 0.344741 | false | 2.576344 | false | false | false |
42cs/book | modules/luther/sphinx/assess/assessbase.py | 1 | 2756 | # Copyright (C) 2011 Bradley N. Miller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'bmiller'
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
("'", r'\u0027'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
# escapejs from Django: https://www.djangoproject.com/
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
if not isinstance(value, basestring):
value = str(value)
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
class Assessment(Directive):
"""Base Class for assessments"""
def getNumber(self):
env = self.state.document.settings.env
if not hasattr(env,'assesscounter'):
env.assesscounter = 0
env.assesscounter += 1
res = "Q-%d"
if hasattr(env,'assessprefix'):
res = env.assessprefix + "%d"
res = res % env.assesscounter
if hasattr(env, 'assesssuffix'):
res += env.assesssuffix
return res
def run(self):
self.options['qnumber'] = self.getNumber()
self.options['divid'] = self.arguments[0]
if self.content[0][:2] == '..': # first line is a directive
self.content[0] = self.options['qnumber'] + ': \n\n' + self.content[0]
else:
self.content[0] = self.options['qnumber'] + ': ' + self.content[0]
if self.content:
if 'iscode' in self.options:
self.options['bodytext'] = '<pre>' + "\n".join(self.content) + '</pre>'
else:
self.options['bodytext'] = "\n".join(self.content)
else:
self.options['bodytext'] = '\n'
| mit | -2,434,039,718,979,837,000 | 28.010526 | 87 | 0.590711 | false | 3.56533 | false | false | false |
dfalk/mezzanine-wiki | mezzanine_wiki/fields.py | 1 | 1385 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mezzanine.utils.importing import import_dotted_path
class WikiTextField(models.TextField):
"""
TextField that stores markup text.
"""
def formfield(self, **kwargs):
"""
Apply the widget class defined by the
``WIKI_TEXT_WIDGET_CLASS`` setting.
"""
from mezzanine.conf import settings
try:
widget_class = import_dotted_path(settings.WIKI_TEXT_WIDGET_CLASS)
except ImportError:
raise ImproperlyConfigured(_("Could not import the value of "
"settings.WIKI_TEXT_WIDGET_CLASS: %s"
% settings.WIKI_TEXT_WIDGET_CLASS))
kwargs["widget"] = widget_class()
formfield = super(WikiTextField, self).formfield(**kwargs)
return formfield
# South requires custom fields to be given "rules".
# See http://south.aeracode.org/docs/customfields.html
if "south" in settings.INSTALLED_APPS:
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules=[((WikiTextField,), [], {})],
patterns=["mezzanine_wiki\.fields\."])
except ImportError:
pass
| bsd-2-clause | 4,441,378,388,650,258,400 | 34.512821 | 78 | 0.638267 | false | 4.382911 | false | false | false |
gdw2/zim | zim/formats/plain.py | 1 | 4786 | # -*- coding: utf-8 -*-
# Copyright 2008 Jaap Karssenberg <[email protected]>
'''This module handles parsing and dumping input in plain text'''
import re
import zim.parser
from zim.parser import prepare_text, Rule
from zim.formats import *
from zim.parsing import url_re
info = {
'name': 'plain',
'desc': 'Plain text',
'mimetype': 'text/plain',
'extension': 'txt',
'native': False,
'import': True,
'export': True,
}
class Parser(ParserClass):
# TODO parse constructs like *bold* and /italic/ same as in email,
# but do not remove the "*" and "/", just display text 1:1
# TODO also try at least to parse bullet and checkbox lists
# common base class with wiki format
# TODO parse markdown style headers
def parse(self, input, partial=False):
if not isinstance(input, basestring):
input = ''.join(input)
if not partial:
input = prepare_text(input)
parser = zim.parser.Parser(
Rule(LINK, url_re.r, process=self.parse_url) # FIXME need .r atribute because url_re is a Re object
)
builder = ParseTreeBuilder(partial=partial)
builder.start(FORMATTEDTEXT)
parser(builder, input)
builder.end(FORMATTEDTEXT)
return builder.get_parsetree()
@staticmethod
def parse_url(builder, text):
builder.append(LINK, {'href': text}, text)
class Dumper(DumperClass):
# We dump more constructs than we can parse. Reason for this
# is to ensure dumping a page to plain text will still be
# readable.
BULLETS = {
UNCHECKED_BOX: u'[ ]',
XCHECKED_BOX: u'[x]',
CHECKED_BOX: u'[*]',
BULLET: u'*',
}
# No additional formatting for these tags, otherwise copy-pasting
# as plain text is no longer plain text
TAGS = {
EMPHASIS: ('', ''),
STRONG: ('', ''),
MARK: ('', ''),
STRIKE: ('', ''),
VERBATIM: ('', ''),
TAG: ('', ''),
SUBSCRIPT: ('', ''),
SUPERSCRIPT: ('', ''),
}
def dump_indent(self, tag, attrib, strings):
# Prefix lines with one or more tabs
if attrib and 'indent' in attrib:
prefix = '\t' * int(attrib['indent'])
return self.prefix_lines(prefix, strings)
# TODO enforces we always end such a block with \n unless partial
else:
return strings
dump_p = dump_indent
dump_div = dump_indent
dump_pre = dump_indent
def dump_h(self, tag, attrib, strings):
# Markdown style headers
level = int(attrib['level'])
if level < 1: level = 1
elif level > 5: level = 5
if level in (1, 2):
# setext-style headers for lvl 1 & 2
if level == 1: char = '='
else: char = '-'
heading = u''.join(strings)
underline = char * len(heading)
return [heading + '\n', underline]
else:
# atx-style headers for deeper levels
tag = '#' * level
strings.insert(0, tag + ' ')
return strings
def dump_list(self, tag, attrib, strings):
if 'indent' in attrib:
# top level list with specified indent
prefix = '\t' * int(attrib['indent'])
return self.prefix_lines('\t', strings)
elif self.context[-1].tag in (BULLETLIST, NUMBEREDLIST):
# indent sub list
prefix = '\t'
return self.prefix_lines('\t', strings)
else:
# top level list, no indent
return strings
dump_ul = dump_list
dump_ol = dump_list
def dump_li(self, tag, attrib, strings):
# Here is some logic to figure out the correct bullet character
# depends on parent list element
# TODO accept multi-line content here - e.g. nested paras
if self.context[-1].tag == BULLETLIST:
if 'bullet' in attrib \
and attrib['bullet'] in self.BULLETS:
bullet = self.BULLETS[attrib['bullet']]
else:
bullet = self.BULLETS[BULLET]
elif self.context[-1].tag == NUMBEREDLIST:
iter = self.context[-1].attrib.get('_iter')
if not iter:
# First item on this level
iter = self.context[-1].attrib.get('start', 1)
bullet = iter + '.'
self.context[-1].attrib['_iter'] = increase_list_iter(iter) or '1'
else:
# HACK for raw tree from pageview
# support indenting
# support any bullet type (inc numbered)
bullet = attrib.get('bullet', BULLET)
if bullet in self.BULLETS:
bullet = self.BULLETS[attrib['bullet']]
# else assume it is numbered..
if 'indent' in attrib:
prefix = int(attrib['indent']) * '\t'
bullet = prefix + bullet
return (bullet, ' ') + tuple(strings) + ('\n',)
def dump_link(self, tag, attrib, strings=None):
# Just plain text, either text of link, or link href
assert 'href' in attrib, \
'BUG: link misses href: %s "%s"' % (attrib, strings)
href = attrib['href']
if strings:
return strings
else:
return href
def dump_img(self, tag, attrib, strings=None):
# Just plain text, either alt text or src
src = attrib['src']
alt = attrib.get('alt')
if alt:
return alt
else:
return src
def dump_object_fallback(self, tag, attrib, strings):
return strings
| gpl-2.0 | -5,482,015,731,721,429,000 | 24.322751 | 102 | 0.649812 | false | 3.058147 | false | false | false |
mswart/pyopenmensa | setup.py | 1 | 2029 | from setuptools import setup # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
import version
setup(
name='pyopenmensa',
version=version.STRING,
description='Usefull python wrapper for creating OpenMensa feeds',
long_description=long_description,
# The project's main homepage.
url='https://github.com/mswart/pyopenmensa',
# Author details
author='Malte Swart',
author_email='[email protected]',
# Choose your license
license='LGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='openmensa feed framework',
package_dir={'pyopenmensa': ''},
packages=['pyopenmensa'],
)
| lgpl-3.0 | 7,562,236,628,138,542,000 | 31.725806 | 83 | 0.644653 | false | 4.166324 | false | false | false |
richard-shepherd/monopyly | AIs/Stephen Chan/big_brick.py | 1 | 20396 | from monopyly import *
import random
class BigBrickAI(PlayerAIBase):
mClassDebug = True
def __init__(self):
self.p("__init__")
self.turn_count = 0
self.chance_cards_count=0
self.community_chest_cards_count=0
self.property_mortgage_cards_count=0
self.auction_spread=4
self.num_players=4
self.num_jail_freecards=0
self.property_set_count={}
self.property_offered_for_auction_adjustment=12
self.properties_we_like = [Square.Name.BOW_STREET,
Square.Name.MARLBOROUGH_STREET,
Square.Name.VINE_STREET,
Square.Name.STRAND,
Square.Name.FLEET_STREET,
Square.Name.TRAFALGAR_SQUARE,
Square.Name.LEICESTER_SQUARE,
Square.Name.COVENTRY_STREET,
Square.Name.PICCADILLY,
Square.Name.REGENT_STREET,
Square.Name.OXFORD_STREET,
Square.Name.BOND_STREET,
Square.Name.PARK_LANE,
Square.Name.MAYFAIR,
Square.Name.PALL_MALL,
Square.Name.WHITEHALL,
Square.Name.NORTHUMBERLAND_AVENUE,
Square.Name.THE_ANGEL_ISLINGTON,
Square.Name.EUSTON_ROAD,
Square.Name.PENTONVILLE_ROAD,
Square.Name.OLD_KENT_ROAD,
Square.Name.WHITECHAPEL_ROAD]
self.properties_we_like_current = Square.Name.BOW_STREET
def p(self, txt):
#print(txt)
pass
def get_name(self):
return "The Big Brick"
def start_of_game(self):
# self.p("Start_of_game")
self.turn_count = 0
return
def start_of_turn(self, game_state, player):
#self.p("Start of turn")
#self.p(self.turn_count)
self.turn_count = self.turn_count + 1
self.num_players = game_state.number_of_players
return
def player_landed_on_square(self, game_state, square, player):
'''
Called when a player lands on a square. All AIs receive this notification.
No response is required.
'''
pass
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
if player.state.cash > 100:
return PlayerAIBase.Action.BUY
else:
return PlayerAIBase.Action.DO_NOT_BUY
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def got_get_out_of_jail_free_card(self):
'''
Called when the player has picked up a
Get Out Of Jail Free card.
No response is required.
'''
self.num_jail_freecards = self.num_jail_freecards + 1
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
if player.state.cash > 100:
return PlayerAIBase.Action.TAKE_A_CHANCE
return PlayerAIBase.Action.PAY_TEN_POUND_FINE
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
adjustment = self.property_offered_for_auction_adjustment
return property.price + self.property_offered_for_auction_adjustment # random.randint(-100, 50)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
# Idea is that we make a better offer for the auction next time if we fail this time, visa versa.
if player == None:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1
return
# squeeze the player in auction for the best deal the next time around!
if player.name == self.get_name():
#self.p("S " + player.name + str(amount_paid))
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment - 1
else:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1 #?
else:
#self.p("F" + player.name + str(amount_paid))
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1
else:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment - 1 #?
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
# I like Sophie's idea!
if player.state.cash < 1000:
return []
for owned_set in player.state.owned_unmortgaged_sets:
if not owned_set.can_build_houses:
continue
return [(p, 1) for p in owned_set.properties]
return []
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if player.state.cash > 100:
return []
property_name = self.get_property_to_propose()
for aloop in range(0, len(self.properties_we_like)):
property = game_state.board.get_square_by_name(property_name)
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
property_name = self.get_property_to_propose()
property = game_state.board.get_square_by_name(property_name)
#self.p(property.name)
return [property]
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return []
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
if self.num_jail_freecards > 0:
self.num_jail_freecards = self.num_jail_freecards -1
return PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
if player.state.cash >=50:
return PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
return PlayerAIBase.Action.STAY_IN_JAIL
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
# systematically propose a deal one by one
property_name = self.get_property_to_propose()
for aloop in range(0, len(self.properties_we_like)):
property = game_state.board.get_square_by_name(property_name)
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
property_name = self.get_property_to_propose()
property = game_state.board.get_square_by_name(property_name)
price_offered = property.price * 1.5
if player.state.cash > price_offered:
return DealProposal(
properties_wanted=[property],
maximum_cash_offered=price_offered,
propose_to_player=property.owner)
#self.p(property_to_propose_deal)
return None
# Rotates to the next property we want. Yes! its wacky.
def get_property_to_propose(self):
property_to_propose_deal = self.properties_we_like_current
index = self.properties_we_like.index(property_to_propose_deal)+1
if index > len(self.properties_we_like)-1:
index = 0
self.properties_we_like_current = self.properties_we_like[index]
return property_to_propose_deal
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
#return DealResponse(DealResponse.Action.REJECT)
total_price = 0
for p in deal_proposal.properties_wanted:
total_price = total_price + p.price
if total_price > 1000:
return DealResponse(
action=DealResponse.Action.ACCEPT,
minimum_cash_wanted= total_price * 2.1)
return DealResponse(DealResponse.Action.REJECT)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
#self.p("deal = " + str(deal_info))
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
#self.p("turns = " + str(self.turn_count))
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
def eminent_domain(self, game_state, player):
'''
Called when the eminent-domain rule is being played.
This rule is invoked in 'boring' games at round 200 if no
player has built any houses. All properties are compulsorily
repurchased by the bank and then immediately auctioned.
This method is called after the repurchase, but before
the auction.
No response is necessary.
'''
pass
| mit | -4,966,430,030,353,533,000 | 33.268908 | 114 | 0.605836 | false | 4.112545 | false | false | false |
proyan/sot-torque-control | python/dynamic_graph/sot/torque_control/identification/identify_motor_acc.py | 1 | 2771 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
from scipy import signal
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from identification_utils import solve1stOrderLeastSquare
def identify_motor_acc(dt, dq, ddq, current, tau, Kt_p, Kv_p, ZERO_VELOCITY_THRESHOLD_SMALL,
ZERO_JERK_THRESHOLD, SHOW_THRESHOLD_EFFECT):
#Filter current*****************************************************
win = signal.hann(10)
filtered_current = signal.convolve(current, win, mode='same') / sum(win)
current = filtered_current
# Mask valid data***************************************************
#~ # remove high jerk
dddq = np.gradient(ddq,1)/dt
maskConstAcc = (abs (dddq)<ZERO_JERK_THRESHOLD)
#~ # erode to get only steady phases where acceleration is constant
maskConstAcc=ndimage.morphology.binary_erosion(maskConstAcc,None,100)
maskPosVel=(dq> ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel=(dq<-ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAcc=np.logical_and( maskConstAcc ,maskPosVel )
maskConstNegAcc=np.logical_and( maskConstAcc ,maskNegVel )
if SHOW_THRESHOLD_EFFECT :
plt.figure()
plt.plot(ddq); plt.ylabel('ddq')
ddq_const=ddq.copy()
ddq_const[np.logical_not(maskConstAcc)]=np.nan
plt.plot(ddq_const); plt.ylabel('ddq_const')
plt.show()
#~ y = a. x + b
#~ i-Kt.tau-Kv.dq = Ka.ddq + Kf
#~
# Identification ***************************************************
y = current-Kt_p*tau - Kv_p*dq
y[maskConstPosAcc] = current[maskConstPosAcc]-Kt_p*tau[maskConstPosAcc] - Kv_p*dq[maskConstPosAcc]
y[maskConstNegAcc] = current[maskConstNegAcc]-Kt_p*tau[maskConstNegAcc] - Kv_p*dq[maskConstNegAcc]
y_label = r'$i(t)-{K_t}{\tau(t)}-{K_v}{\dot{q}(t)}$'
x = ddq
x_label = r'$\ddot{q}(t)$'
(Kap,Kfp)=solve1stOrderLeastSquare(x[maskConstPosAcc],y[maskConstPosAcc])
(Kan,b)=solve1stOrderLeastSquare(x[maskConstNegAcc],y[maskConstNegAcc])
Kfn=-b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black',lw=1)
plt.axvline(0, color='black',lw=1)
plt.plot(x ,y ,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskConstPosAcc],y[maskConstPosAcc],'rx',lw=3,markersize=1);
plt.plot(x[maskConstNegAcc],y[maskConstNegAcc],'bx',lw=3,markersize=1);
#plot identified lin model
plt.plot([min(x),max(x)],[Kap*min(x)+Kfp ,Kap*max(x)+Kfp],'g:',lw=3)
plt.plot([min(x),max(x)],[Kan*min(x)-Kfn ,Kan*max(x)-Kfn],'g:',lw=3)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.show()
return (Kap, Kan, Kfp, Kfn) | gpl-3.0 | -7,318,730,382,067,557,000 | 39.173913 | 102 | 0.591483 | false | 2.901571 | false | false | false |
hydroshare/hydroshare_temp | hs_party/forms/person.py | 1 | 3624 | __author__ = 'valentin'
#from mezzanine.forms.models import Form
from django.forms import ModelForm, Textarea
from django import forms
from django.forms.models import inlineformset_factory,modelformset_factory,BaseModelFormSet
from ..models.organization import Organization
from ..models.person import Person,PersonLocation,PersonExternalIdentifier,\
PersonPhone,PersonEmail,OtherName
from ..models.organization_association import OrganizationAssociation
from .organization_association import OrganizationAssociationEditorForm
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from crispy_forms.bootstrap import TabHolder, Tab
class PersonCreateForm(ModelForm):
class Meta:
model = Person
fields = ( 'name','givenName','familyName','primaryOrganizationRecord',
'jobTitle','notes','url',
# 'primaryAddress',"primaryTelephone"
)
widgets = {
'notes': Textarea(attrs={'cols': 80, 'rows': 6}),
}
labels = {
'notes': _('Short Bio'),
'name': _('Full Name of Person (must be unique)'),
'primaryOrganizationRecord': _('Select Primary Organization'),
'givenName': _('First or Given Name'),
'familyName': _('Last or Family Name'),
}
help_texts = {
'notes': _('Short Biography discussing you work and interests.'),
'name': _('Full Name of Person that will be displayed on the site. Must be unique.'),
}
# intial form
class PersonEditorForm(ModelForm):
class Meta:
model = Person
fields = ( 'name','givenName','familyName','primaryOrganizationRecord',
'jobTitle','notes','url',
# 'primaryAddress',"primaryTelephone"
)
widgets = {
'notes': Textarea(attrs={'cols': 80, 'rows': 6}),
}
labels = {
'notes': _('Short Bio'),
'name': _('Full Name of Person (must be unique)'),
'primaryOrganizationRecord': _('Select Primary Organization'),
}
help_texts = {
'notes': _('Short Biography discussing you work and interests.'),
'name': _('Full Name of Person that will be displayed on the site. Must be unique.'),
}
pass
LocationFormSet = inlineformset_factory(
Person,
PersonLocation,
extra=2,)
EmailFormSet = inlineformset_factory(
Person,
PersonEmail,
extra=2,)
PhoneFormSet = inlineformset_factory(
Person,
PersonPhone,
extra=2,)
NameFormSet = inlineformset_factory(
Person,
OtherName,
extra=2,)
IdentifierFormSet = inlineformset_factory(
Person,
PersonExternalIdentifier,
extra=2,)
OrgAssociationsFormSet = inlineformset_factory(
Person,
Organization.persons.through,
#Person.organizations.through,
extra=2)
# class OrganizationAssociationFormset(BaseModelFormSet):
# def __init__(self, *args, **kwargs):
# super(OrganizationAssociationFormset, self).__init__(*args, **kwargs)
# self.queryset = OrganizationAssociation.objects.filter(name__startswith='O')
# OrgAssociationsFormSet = modelformset_factory(
# OrganizationAssociation,
# # form=OrganizationAssociationEditorForm,
# extra=2)
# class PersonForm(ModelForm):
# class Meta:
# model = Person
# fields ={"givenName","familyName","name",}
#
# pass | bsd-3-clause | -8,476,676,534,957,222,000 | 31.079646 | 97 | 0.648179 | false | 4.288757 | false | false | false |
ivanfilippov/PowerDNS-Admin | create_db.py | 1 | 2745 | #!/usr/bin/env python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
from app.models import Role, Setting
import os.path
import time
import sys
def start():
wait_time = get_waittime_from_env()
if not connect_db(wait_time):
print("ERROR: Couldn't connect to database server")
exit(1)
init_records()
def get_waittime_from_env():
return int(os.environ.get('WAITFOR_DB', 1))
def connect_db(wait_time):
for i in xrange(0, wait_time):
print("INFO: Wait for database server")
sys.stdout.flush()
try:
db.create_all()
return True
except:
time.sleep(1)
return False
def init_roles(db, role_names):
# Get key name of data
name_of_roles = map(lambda r: r.name, role_names)
# Query to get current data
rows = db.session.query(Role).filter(Role.name.in_(name_of_roles)).all()
name_of_rows = map(lambda r: r.name, rows)
# Check which data that need to insert
roles = filter(lambda r: r.name not in name_of_rows, role_names)
# Insert data
for role in roles:
db.session.add(role)
def init_settings(db, setting_names):
# Get key name of data
name_of_settings = map(lambda r: r.name, setting_names)
# Query to get current data
rows = db.session.query(Setting).filter(Setting.name.in_(name_of_settings)).all()
# Check which data that need to insert
name_of_rows = map(lambda r: r.name, rows)
settings = filter(lambda r: r.name not in name_of_rows, setting_names)
# Insert data
for setting in settings:
db.session.add(setting)
def init_records():
# Create initial user roles and turn off maintenance mode
init_roles(db, [
Role('Administrator', 'Administrator'),
Role('User', 'User')
])
init_settings(db, [
Setting('maintenance', 'False'),
Setting('fullscreen_layout', 'True'),
Setting('record_helper', 'True'),
Setting('login_ldap_first', 'True'),
Setting('default_record_table_size', '15'),
Setting('default_domain_table_size', '10'),
Setting('auto_ptr','False')
])
db_commit = db.session.commit()
commit_version_control(db_commit)
def commit_version_control(db_commit):
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
elif db_commit is not None:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
if __name__ == '__main__':
start()
| mit | 6,372,247,686,023,421,000 | 27.894737 | 115 | 0.647359 | false | 3.422693 | false | false | false |
RyanChinSang/ECNG3020-ORSS4SCVI | BETA/dev02/test3.py | 1 | 4947 | from __future__ import print_function
from threading import Thread
import sys
import cv2
import numpy as np
import queue
from BETA.dev02.test2 import avg_color
from BETA.dev02.test4 import t2s_say
class VideoStream:
def __init__(self, src=None, height=None, width=None, ratio=None):
cv2.setUseOptimized(True)
if src is None:
camera_list = []
for i in range(10):
cap = cv2.VideoCapture(i)
if cap.isOpened():
camera_list += [i]
cap.release()
if len(camera_list) == 1:
src = camera_list[0]
elif len(camera_list) == 0:
src = -1
print('NOTICE: There were no detected working cameras for indexes 0 to 10!')
else:
src = camera_list[0]
msg = 'NOTICE: There are ' + str(len(camera_list) - 1) \
+ ' other operational camera source(s) available: ' + str(camera_list[1:])
print(msg.replace('are', 'is')) if len(camera_list) - 1 == 1 else print(msg)
self.avg = np.array([])
self.freq = cv2.getTickFrequency()
self.begin = 0
self.stream = cv2.VideoCapture(src)
self.config(dim=None, height=height, width=width, ratio=ratio)
(self.grabbed, self.frame) = self.stream.read()
self.released = not self.grabbed
def start(self):
if sys.version[0] == '3':
Thread(target=self.update, args=(), daemon=True).start()
else:
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.released:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self, width=None, height=None, ratio=None):
self.begin = cv2.getTickCount()
return (not self.released), self.resize(frame=self.frame, width=width, height=height, ratio=ratio)
def release(self):
self.stream.release()
self.released = True
def isOpened(self):
return not self.released
def fps(self):
self.avg = np.append(self.avg, (self.freq / (cv2.getTickCount() - self.begin)))
return self.avg[-1]
def avg_fps(self):
self.avg = np.append(self.avg, (self.freq / (cv2.getTickCount() - self.begin)))
return self.avg.mean()
def config(self, dim, height, width, ratio):
if ratio is None:
if height and width:
dim = (height, (height * float(width / height)))
elif not height and not width:
pass
else:
print('WARNING: Insufficient configuration parameters. The default was used.')
else:
if height:
dim = (height, (height * float(ratio)))
elif width:
dim = ((width / float(ratio)), width)
if dim:
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.round_up(dim[0]))
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.round_up(dim[1]))
def resize(self, frame, width, height, ratio):
dim = (dheight, dwidth) = frame.shape[:2]
if ratio is None:
if width and height:
dim = (height, width)
elif width and height is None:
dim = ((dheight * (width / dwidth)), width)
elif width is None and height:
dim = (height, (dwidth * (height / dheight)))
else:
if width is None and height is None:
dim = (dheight, (dheight * ratio))
elif width is None and height:
dim = (height, (height * ratio))
elif width and height is None:
dim = ((width / ratio), width)
else:
if (width / height) == ratio:
dim = (height, width)
else:
print('WARNING: Window resolution (' + str(width) + '*' + str(height)
+ ') does not agree with ratio ' + str(ratio) + '. The default was used.')
return cv2.resize(frame, (self.round_up(dim[1]), self.round_up(dim[0])), interpolation=cv2.INTER_AREA)
@staticmethod
def round_up(num):
return int(-(-num // 1))
if __name__ == '__main__':
q = queue.Queue()
size = 20
cap = VideoStream().start()
init_frame = cap.read()[1]
frame_height, frame_width = init_frame.shape[:2]
while cap.isOpened():
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
Thread(target=avg_color, args=(frame, size, frame_height, frame_width, q)).start()
Thread(target=t2s_say, args=(q.get(), q)).start()
cv2.destroyAllWindows()
'''
To add:
1- Fixes (see below)
To fix:
1- See #1.NB[1-3] in BETA.TestCode.OpenCV.VideoCap3.py
v1.6
''' | gpl-3.0 | -4,433,979,133,669,497,000 | 34.092199 | 110 | 0.538508 | false | 3.719549 | false | false | false |
wangtaoking1/hummer | backend/kubernetes/k8sclient.py | 1 | 9953 | import json
import requests
import logging
from backend.kubernetes.namespace import Namespace
from backend.kubernetes.replicationcontroller import Controller
from backend.kubernetes.service import Service
from backend.kubernetes.volume import PersistentVolume, PersistentVolumeClaim
from backend.kubernetes.autoscaler import AutoScaler
logger = logging.getLogger('hummer')
class KubeClient(object):
"""
Kubernetes simple python client.
API: http://kubernetes.io/third_party/swagger-ui/
"""
_base_url = None
def __init__(self, base_url):
self._base_url = self.add_slash(base_url)
@classmethod
def add_slash(self, url):
"""
Promote that the base url ends with '/'.
"""
if url.endswith('/'):
return url
return url + '/'
@property
def base_url(self):
return self._base_url
def _send_request(self, method, path, label=None, query=None, body=None):
"""
Send requests to k8s server and get the response.
Returns a response dict.
Parameters:
query: str, "app=name"
"""
url = self._base_url + path
if label:
url = '{}?labelSelector={}'.format(url, label)
if query:
url = url + '?' + query
kwargs = {}
if body:
kwargs['data'] = json.dumps(body)
try:
res = getattr(requests, method.lower())(url, **kwargs)
except Exception as error:
logger.error(error)
logger.error(res)
return None
try:
response = json.loads(res.text)
return response
except Exception as error:
return res.text
def list_nodes(self):
"""
List all nodes.
"""
res = self._send_request('GET', 'nodes')
nodes = []
for item in res.get('items'):
nodes.append(item['metadata']['name'])
return nodes
def list_namespces(self):
"""
List all namespaces.
"""
res = self._send_request('GET', 'namespaces')
namespaces = []
for item in res.get('items'):
namespaces.append(item['metadata']['name'])
return namespaces
def create_namespace(self, name):
"""
Create namespace called name.
"""
namespace = Namespace(name)
response = self._send_request('POST', 'namespaces', body=namespace.body)
return self._is_creating_deleting_successful(response)
def delete_namespace(self, name):
"""
Delete namespace called name.
"""
response = self._send_request('DELETE', 'namespaces/{}'.format(name))
return self._is_creating_deleting_successful(response)
def create_persistentvolume(self, namespace, name, capacity, nfs_path,
nfs_server):
"""
Create persistentvolume called namespace-name.
"""
volume_name = namespace + '-' + name
volume = PersistentVolume(volume_name, capacity, nfs_path, nfs_server)
response = self._send_request('POST', 'persistentvolumes',
body=volume.body)
return self._is_creating_deleting_successful(response)
def delete_persistentvolume(self, namespace, name):
"""
Delete persistentvolume called namespace-name.
"""
volume_name = namespace + '-' + name
response = self._send_request('DELETE', 'persistentvolumes/{}'.format(
volume_name))
return self._is_creating_deleting_successful(response)
def create_persistentvolumeclaim(self, namespace, name, capacity):
"""
Create persistentvolumeclaim called name.
"""
volume_name = namespace + '-' + name
volumeclaim = PersistentVolumeClaim(volume_name, capacity)
response = self._send_request('POST',
'namespaces/{}/persistentvolumeclaims'.format(namespace),
body=volumeclaim.body)
return self._is_creating_deleting_successful(response)
def delete_persistentvolumeclaim(self, namespace, name):
"""
Delete persistentvolumeclaim called name.
"""
volume_name = namespace + '-' + name
response = self._send_request('DELETE',
'namespaces/{}/persistentvolumeclaims/{}'.format(namespace,
volume_name))
return self._is_creating_deleting_successful(response)
def list_controllers(self, namespace):
"""
List all replicationcontroller in the namespace.
"""
path = 'namespaces/{}/replicationcontrollers'.format(namespace)
res = self._send_request('GET', path)
controllers = []
for item in res.get('items'):
controllers.append(item['metadata']['name'])
return controllers
def create_controller(self, namespace, name, image_name, cpu, memory,
replicas=1, tcp_ports=None, udp_ports=None, commands=None, args=None,
envs=None, volumes=None):
"""
Create a replicationcontroller.
"""
controller = Controller(name, image_name, cpu, memory, replicas,
tcp_ports, udp_ports, commands, args, envs, volumes)
path = 'namespaces/{}/replicationcontrollers'.format(namespace)
# logger.debug(controller.body)
response = self._send_request('POST', path, body=controller.body)
return self._is_creating_deleting_successful(response)
def delete_controller(self, namespace, name):
"""
Delete a replicationcontroller.
"""
path = 'namespaces/{}/replicationcontrollers/{}'.format(namespace, name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
def list_pods(self, namespace, label=None):
"""
List pods by label.
Parameters:
label: str, "app=name"
"""
path = 'namespaces/{}/pods/'.format(namespace)
response = self._send_request('GET', path, label=label)
# logger.debug(response)
pods = []
for item in response.get('items'):
pods.append(item['metadata']['name'])
return pods
def list_host_ips(self, namespace, label=None):
"""
List all host ips for a controller.
Parameters:
label: str, "app=name"
"""
path = 'namespaces/{}/pods/'.format(namespace)
response = self._send_request('GET', path, label=label)
# logger.debug(response)
hosts = set()
for pod in response.get('items', []):
hosts.add(pod['spec']['nodeName'])
return list(hosts)
def delete_pod(self, namespace, name):
"""
Delete a pod.
"""
path = 'namespaces/{}/pods/{}'.format(namespace, name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
def list_services(self, namespace):
"""
List all services in the namespace.
"""
path = 'namespaces/{}/services'.format(namespace)
res = self._send_request('GET', path)
services = []
for item in res.get('items'):
services.append(item['metadata']['name'])
return services
def create_service(self, namespace, name, tcp_ports=None, udp_ports=None,
is_public=False, session_affinity=False):
"""
Create a service in namespace.
"""
service = Service(name, tcp_ports, udp_ports, is_public,
session_affinity)
path = 'namespaces/{}/services'.format(namespace)
# logger.debug(service.body)
response = self._send_request('POST', path, body=service.body)
return self._is_creating_deleting_successful(response)
def delete_service(self, namespace, name):
"""
Delete a service.
"""
path = 'namespaces/{}/services/{}'.format(namespace, name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
def get_service_details(self, namespace, name):
"""
Get the details of a service.
"""
path = 'namespaces/{}/services/{}'.format(namespace, name)
response = self._send_request('GET', path)
return response
def _is_creating_deleting_successful(self, response):
"""
Check the response to determinate whether creating and resource
successfully.
"""
status = response['status']
if isinstance(status, str) and status == 'Failure':
logger.debug(response['message'])
return False
return True
def get_logs_of_pod(self, namespace, pod_name, tail_line):
"""
Return the tail tail_line lines of logs of pod named pod_name.
"""
path = 'namespaces/{}/pods/{}/log'.format(namespace, pod_name)
query = "tailLines=" + str(tail_line)
response = self._send_request('GET', path, query=query)
return response.split('\n')
def create_autoscaler(self, namespace, name, minReplicas=-1, maxReplicas=-1,
cpu_target=-1):
"""
Create an autoscaler name in namespace namespace.
"""
scaler = AutoScaler(name, minReplicas, maxReplicas, cpu_target)
path = 'namespaces/{}/horizontalpodautoscalers'.format(namespace)
# logger.debug(scaler.body)
response = self._send_request('POST', path, body=scaler.body)
return self._is_creating_deleting_successful(response)
def delete_autoscaler(self, namespace, name):
"""
Delete the autoscaler name.
"""
path = 'namespaces/{}/horizontalpodautoscalers/{}'.format(namespace,
name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
| apache-2.0 | -1,566,903,951,912,053,800 | 32.969283 | 80 | 0.594394 | false | 4.384581 | false | false | false |
martijnvermaat/rpclib | src/rpclib/util/etreeconv.py | 1 | 3585 |
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""This module contains the utility methods that convert an ElementTree
hierarchy to python dicts and vice versa.
"""
from lxml import etree
from rpclib.util.odict import odict
def root_dict_to_etree(d):
"""Converts a dictionary to an xml hiearchy. Just like a valid xml document,
the dictionary must have a single element. The format of the child
dictionaries is the same as :func:`dict_to_etree`.
"""
assert len(d) == 1
key, = d.keys()
retval = etree.Element(key)
for val in d.values():
break
if isinstance(val, dict) or isinstance(val, odict):
dict_to_etree(val, retval)
else:
for a in val:
dict_to_etree(a, retval)
return retval
def dict_to_etree(d, parent):
"""Takes a the dict whose value is either None or an instance of dict, odict
or an iterable. The iterables can contain either other dicts/odicts or
str/unicode instances.
"""
for k, v in d.items():
if v is None or len(v) == 0:
etree.SubElement(parent, k)
elif isinstance(v, dict) or isinstance(v, odict):
child = etree.SubElement(parent, k)
dict_to_etree(v, child)
else:
for e in v:
child=etree.SubElement(parent, k)
if isinstance(e, dict) or isinstance(e, odict):
dict_to_etree(e, child)
else:
child.text=str(e)
def root_etree_to_dict(element, iterable=(list, list.append)):
"""Takes an xml root element and returns the corresponding dict. The second
argument is a pair of iterable type and the function used to add elements to
the iterable. The xml attributes are ignored.
"""
return {element.tag: iterable[0]([etree_to_dict(element, iterable)])}
def etree_to_dict(element, iterable=(list, list.append)):
"""Takes an xml root element and returns the corresponding dict. The second
argument is a pair of iterable type and the function used to add elements to
the iterable. The xml attributes are ignored.
"""
if (element.text is None) or element.text.isspace():
retval = odict()
for elt in element:
if not (elt.tag in retval):
retval[elt.tag] = iterable[0]()
iterable[1](retval[elt.tag], etree_to_dict(elt, iterable))
else:
retval = element.text
return retval
def etree_strip_namespaces(element):
"""Removes any namespace information form the given element recursively."""
retval = etree.Element(element.tag.rpartition('}')[-1])
retval.text = element.text
for a in element.attrib:
retval.attrib[a.rpartition('}')[-1]] = element.attrib[a]
for e in element:
retval.append(etree_strip_namespaces(e))
return retval
| lgpl-2.1 | 138,900,269,373,878,800 | 32.194444 | 80 | 0.660251 | false | 4.005587 | false | false | false |
prymitive/upaas-admin | upaas_admin/apps/applications/models.py | 1 | 31729 | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013-2014 by Łukasz Mierzwa
:contact: [email protected]
"""
from __future__ import unicode_literals
import os
import datetime
import logging
import tempfile
import shutil
import time
import re
from copy import deepcopy
from mongoengine import (Document, DateTimeField, StringField, LongField,
ReferenceField, ListField, DictField, QuerySetManager,
BooleanField, IntField, NULLIFY, signals)
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.conf import settings
from upaas import utils
from upaas import tar
from upaas.checksum import calculate_file_sha256, calculate_string_sha256
from upaas.config.base import UPAAS_CONFIG_DIRS
from upaas.config.metadata import MetadataConfig
from upaas.storage.exceptions import StorageError
from upaas import processes
from upaas.utils import load_handler
from upaas_admin.apps.servers.models import RouterServer, BackendServer
from upaas_admin.apps.scheduler.models import ApplicationRunPlan
from upaas_admin.apps.applications.exceptions import UnpackError
from upaas_admin.apps.scheduler.base import Scheduler
from upaas_admin.apps.tasks.constants import TaskStatus
from upaas_admin.apps.tasks.models import Task
from upaas_admin.apps.applications.constants import (
NeedsBuildingFlag, NeedsStoppingFlag, NeedsRestartFlag, IsStartingFlag,
NeedsUpgradeFlag, FLAGS_BY_NAME)
from upaas_admin.apps.applications.helpers import (
ApplicationStateHelper, ApplicationFeatureHelper)
log = logging.getLogger(__name__)
class Package(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
metadata = StringField(help_text=_('Application metadata'))
application = ReferenceField('Application', dbref=False, required=True)
task = ReferenceField(Task, dbref=False)
interpreter_name = StringField(required=True)
interpreter_version = StringField(required=True)
parent = StringField()
parent_package = ReferenceField('Package')
filename = StringField()
bytes = LongField(required=True)
checksum = StringField(required=True)
builder = StringField(required=True)
distro_name = StringField(required=True)
distro_version = StringField(required=True)
distro_arch = StringField(required=True)
revision_id = StringField()
revision_author = StringField()
revision_date = DateTimeField()
revision_description = StringField()
revision_changelog = StringField()
ack_filename = '.upaas-unpacked'
meta = {
'indexes': ['filename'],
'ordering': ['-date_created'],
}
_default_manager = QuerySetManager()
@classmethod
def pre_delete(cls, sender, document, **kwargs):
log.debug(_("Pre delete signal on package {id}").format(
id=document.safe_id))
Application.objects(id=document.application.id).update_one(
pull__packages=document.id)
document.delete_package_file(null_filename=False)
@property
def safe_id(self):
return str(self.id)
@property
def metadata_config(self):
if self.metadata:
return MetadataConfig.from_string(self.metadata)
return {}
@property
def upaas_config(self):
return settings.UPAAS_CONFIG
@property
def package_path(self):
"""
Unpacked package directory path
"""
return os.path.join(settings.UPAAS_CONFIG.paths.apps, self.safe_id)
@property
def ack_path(self):
return os.path.join(self.package_path, self.ack_filename)
def delete_package_file(self, null_filename=True):
log.debug(_("Deleting package file for {pkg}").format(
pkg=self.safe_id))
if not self.filename:
log.debug(_("Package {pkg} has no filename, skipping "
"delete").format(pkg=self.safe_id))
return
storage = load_handler(self.upaas_config.storage.handler,
self.upaas_config.storage.settings)
if not storage:
log.error(_("Storage handler '{handler}' not found, cannot "
"package file").format(
handler=self.upaas_config.storage.handler))
return
log.debug(_("Checking if package file {path} is stored").format(
path=self.filename))
if storage.exists(self.filename):
log.info(_("Removing package {pkg} file from storage").format(
pkg=self.safe_id))
storage.delete(self.filename)
if null_filename:
log.info(_("Clearing filename for package {pkg}").format(
pkg=self.safe_id))
del self.filename
self.save()
def uwsgi_options_from_metadata(self):
"""
Parse uWSGI options in metadata (if any) and return only allowed.
"""
options = []
compiled = []
for regexp in self.upaas_config.apps.uwsgi.safe_options:
compiled.append(re.compile(regexp))
for opt in self.metadata_config.uwsgi.settings:
if '=' in opt:
for regexp in compiled:
opt_name = opt.split('=')[0].rstrip(' ')
if regexp.match(opt_name):
options.append(opt)
log.debug(_("Adding safe uWSGI option from metadata: "
"{opt}").format(opt=opt))
break
return options
def generate_uwsgi_config(self, backend_conf):
"""
:param backend_conf: BackendRunPlanSettings instance for which we
generate config
"""
def _load_template(path):
log.debug("Loading uWSGI template from: %s" % path)
for search_path in UPAAS_CONFIG_DIRS:
template_path = os.path.join(search_path, path)
if os.path.exists(template_path):
f = open(template_path)
ret = f.read().splitlines()
f.close()
return ret
return []
# so it won't change while generating configuration
config = deepcopy(self.upaas_config)
base_template = config.interpreters['uwsgi']['template']
template = None
try:
template_any = config.interpreters[self.interpreter_name]['any'][
'uwsgi']['template']
except (AttributeError, KeyError):
pass
else:
if template_any:
template = template_any
try:
template_version = config.interpreters[self.interpreter_name][
self.interpreter_version]['uwsgi']['template']
except (AttributeError, KeyError):
pass
else:
if template_version:
template = template_version
max_memory = backend_conf.workers_max
max_memory *= self.application.run_plan.memory_per_worker
max_memory *= 1024 * 1024
variables = {
'namespace': self.package_path,
'chdir': config.apps.home,
'socket': '%s:%d' % (backend_conf.backend.ip, backend_conf.socket),
'stats': '%s:%d' % (backend_conf.backend.ip, backend_conf.stats),
'uid': config.apps.uid,
'gid': config.apps.gid,
'app_name': self.application.name,
'app_id': self.application.safe_id,
'pkg_id': self.safe_id,
'max_workers': backend_conf.workers_max,
'max_memory': max_memory,
'memory_per_worker': self.application.run_plan.memory_per_worker,
'max_log_size':
self.application.run_plan.max_log_size * 1024 * 1024,
}
if config.apps.graphite.carbon:
variables['carbon_servers'] = ' '.join(
config.apps.graphite.carbon)
variables['carbon_timeout'] = config.apps.graphite.timeout
variables['carbon_frequency'] = config.apps.graphite.frequency
variables['carbon_max_retry'] = config.apps.graphite.max_retry
variables['carbon_retry_delay'] = config.apps.graphite.retry_delay
variables['carbon_root'] = config.apps.graphite.root
try:
variables.update(config.interpreters[self.interpreter_name]['any'][
'uwsgi']['vars'])
except (AttributeError, KeyError):
pass
try:
variables.update(config.interpreters[self.interpreter_name][
self.interpreter_version]['uwsgi']['vars'])
except (AttributeError, KeyError):
pass
# interpretere default settings for any version
try:
for key, value in list(config.interpreters[self.interpreter_name][
'any']['settings'].items()):
var_name = "meta_%s_%s" % (self.interpreter_name, key)
variables[var_name] = value
except (AttributeError, KeyError):
pass
# interpretere default settings for current version
try:
for key, value in list(config.interpreters[self.interpreter_name][
self.interpreter_version]['settings'].items()):
var_name = "meta_%s_%s" % (self.interpreter_name, key)
variables[var_name] = value
except (AttributeError, KeyError):
pass
# interpreter settings from metadata
try:
for key, val in list(
self.metadata_config.interpreter.settings.items()):
var_name = "meta_%s_%s" % (self.interpreter_name, key)
variables[var_name] = val
except KeyError:
pass
envs = {}
try:
envs.update(config.interpreters[self.interpreter_name]['any'][
'env'])
except (AttributeError, KeyError):
pass
try:
envs.update(config.interpreters[self.interpreter_name][
self.interpreter_version]['env'])
except (AttributeError, KeyError):
pass
envs.update(self.metadata_config.env)
plugin = None
try:
plugin = config.interpreters[self.interpreter_name]['any'][
'uwsgi']['plugin']
except (AttributeError, KeyError):
pass
try:
plugin = config.interpreters[self.interpreter_name][
self.interpreter_version]['uwsgi']['plugin']
except (AttributeError, KeyError):
pass
options = ['[uwsgi]']
options.append('\n# starting uWSGI config variables list')
for key, value in list(variables.items()):
options.append('var_%s = %s' % (key, value))
for feature in self.application.feature_helper.load_enabled_features():
envs = feature.update_env(self.application, envs)
options.append('\n# starting ENV variables list')
for key, value in list(envs.items()):
options.append('env = %s=%s' % (key, value))
options.append(
'env = UPAAS_SYSTEM_DOMAIN=%s' % self.application.system_domain)
if self.application.custom_domains:
options.append('env = UPAAS_CUSTOM_DOMAINS=%s' % ','.join(
[d.name for d in self.application.custom_domains]))
options.append('\n# starting options from app metadata')
for opt in self.uwsgi_options_from_metadata():
options.append(opt)
# enable cheaper mode if we have multiple workers
if backend_conf.workers_max > backend_conf.workers_min:
options.append('\n# enabling cheaper mode')
options.append('cheaper = %d' % backend_conf.workers_min)
options.append('\n# starting base template')
options.extend(_load_template(base_template))
if config.apps.graphite.carbon:
options.append('\n# starting carbon servers block')
for carbon in config.apps.graphite.carbon:
options.append('carbon = %s' % carbon)
options.append('\n# starting interpreter plugin')
if plugin:
options.append('plugin = %s' % plugin)
options.append('\n# starting interpreter template')
options.extend(_load_template(template))
options.append('\n# starting subscriptions block')
for router in RouterServer.objects(is_enabled=True):
options.append('subscribe2 = server=%s:%d,key=%s' % (
router.subscription_ip, router.subscription_port,
self.application.system_domain))
for domain in self.application.custom_domains:
options.append('subscribe2 = server=%s:%d,key=%s' % (
router.subscription_ip, router.subscription_port,
domain.name))
options.append('\n')
for feature in self.application.feature_helper.load_enabled_features():
options = feature.update_vassal(self.application, options)
options.append('\n')
return options
def check_vassal_config(self, options):
"""
Verify is there is uWSGI vassal configuration file and if it doesn't
need updating.
"""
if os.path.exists(self.application.vassal_path):
current_hash = calculate_file_sha256(self.application.vassal_path)
new_hash = calculate_string_sha256(options)
if current_hash == new_hash:
return True
return False
def save_vassal_config(self, backend):
log.info(_("Generating uWSGI vassal configuration"))
options = "\n".join(self.generate_uwsgi_config(backend))
if self.check_vassal_config(options):
log.info("Vassal is present and valid, skipping rewrite")
return
log.info(_("Saving vassal configuration to {path}").format(
path=self.application.vassal_path))
with open(self.application.vassal_path, 'w') as vassal:
vassal.write(options)
log.info(_("Vassal saved"))
def unpack(self):
# directory is encoded into string to prevent unicode errors
directory = tempfile.mkdtemp(dir=self.upaas_config.paths.workdir,
prefix="upaas_package_").encode("utf-8")
storage = load_handler(self.upaas_config.storage.handler,
self.upaas_config.storage.settings)
if not storage:
log.error("Storage handler '%s' not "
"found" % self.upaas_config.storage.handler)
workdir = os.path.join(directory, "system")
pkg_path = os.path.join(directory, self.filename)
if os.path.exists(self.package_path):
log.error(_("Package directory already exists: {path}").format(
path=self.package_path))
raise UnpackError(_("Package directory already exists"))
log.info("Fetching package '%s'" % self.filename)
try:
storage.get(self.filename, pkg_path)
except StorageError:
log.error(_("Storage error while fetching package {name}").format(
name=self.filename))
utils.rmdirs(directory)
raise UnpackError(_("Storage error while fetching package "
"{name}").format(name=self.filename))
log.info("Unpacking package")
os.mkdir(workdir, 0o755)
if not tar.unpack_tar(pkg_path, workdir):
log.error(_("Error while unpacking package to '{workdir}'").format(
workdir=workdir))
utils.rmdirs(directory)
raise UnpackError(_("Error during package unpack"))
with open(os.path.join(workdir, self.ack_filename), 'w') as ack:
ack.write(_('Unpacked: {now}').format(now=datetime.datetime.now()))
for feature in self.application.feature_helper.load_enabled_features():
feature.after_unpack(self.application, workdir)
log.info(_("Package unpacked, moving into '{path}'").format(
path=self.package_path))
try:
shutil.move(workdir, self.package_path)
except shutil.Error as e:
log.error(_("Error while moving unpacked package to final "
"destination: e").format(e=e))
utils.rmdirs(directory, self.package_path)
raise UnpackError(_("Can't move to final directory: "
"{path}").format(path=self.package_path))
log.info(_("Package moved"))
utils.rmdirs(directory)
class ApplicationDomain(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
application = ReferenceField('Application', dbref=False, required=True)
name = StringField(required=True, unique=True, min_length=4, max_length=64)
validated = BooleanField()
meta = {
'indexes': ['application']
}
@property
def safe_id(self):
return str(self.id)
class FlagLock(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
application = ReferenceField('Application', dbref=False, required=True)
flag = StringField(required=True)
backend = ReferenceField(BackendServer, reverse_delete_rule=NULLIFY)
pid = IntField(required=True)
meta = {
'indexes': [
{'fields': ['application', 'flag', 'backend'], 'unique': True},
],
'ordering': ['-date_created'],
}
class ApplicationFlag(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
application = ReferenceField('Application', dbref=False, required=True)
name = StringField(required=True, unique_with='application')
options = DictField()
pending = BooleanField(default=True)
pending_backends = ListField(ReferenceField(BackendServer))
meta = {
'indexes': [
{'fields': ['name', 'application'], 'unique': True},
{'fields': ['name']},
{'fields': ['pending']},
],
'ordering': ['-date_created'],
}
@property
def title(self):
return FLAGS_BY_NAME.get(self.name).title
class Application(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
name = StringField(required=True, min_length=2, max_length=60,
unique_with='owner', verbose_name=_('name'))
# FIXME reverse_delete_rule=DENY for owner
owner = ReferenceField('User', dbref=False, required=True)
metadata = StringField(verbose_name=_('Application metadata'),
required=True)
current_package = ReferenceField(Package, dbref=False, required=False)
packages = ListField(ReferenceField(Package, dbref=False,
reverse_delete_rule=NULLIFY))
run_plan = ReferenceField('ApplicationRunPlan', dbref=False)
_default_manager = QuerySetManager()
meta = {
'indexes': [
{'fields': ['name', 'owner'], 'unique': True},
{'fields': ['packages']},
],
'ordering': ['name'],
}
def __init__(self, *args, **kwargs):
super(Application, self).__init__(*args, **kwargs)
self.state_helper = ApplicationStateHelper(self)
self.feature_helper = ApplicationFeatureHelper(self)
@property
def safe_id(self):
return str(self.id)
@property
def metadata_config(self):
if self.metadata:
return MetadataConfig.from_string(self.metadata)
return {}
@property
def upaas_config(self):
return settings.UPAAS_CONFIG
@property
def vassal_path(self):
"""
Application vassal config file path.
"""
return os.path.join(self.upaas_config.paths.vassals,
'%s.ini' % self.safe_id)
@property
def interpreter_name(self):
"""
Will return interpreter from current package metadata.
If no package was built interpreter will be fetched from app metadata.
If app has no metadata it will return None.
"""
if self.current_package:
return self.current_package.interpreter_name
else:
try:
return self.metadata_config.interpreter.type
except KeyError:
return None
@property
def interpreter_version(self):
"""
Will return interpreter version from current package metadata.
If no package was built interpreter will be fetched from app metadata.
If app has no metadata it will return None.
"""
if self.current_package:
return self.current_package.interpreter_version
elif self.metadata:
return utils.select_best_version(self.upaas_config,
self.metadata_config)
@property
def supported_interpreter_versions(self):
"""
Return list of interpreter versions that this app can run.
"""
if self.metadata:
return sorted(list(utils.supported_versions(
self.upaas_config, self.metadata_config).keys()), reverse=True)
@property
def can_start(self):
"""
Returns True only if package is not started but it can be.
"""
return bool(self.current_package and self.run_plan is None)
@property
def tasks(self):
"""
List of all tasks for this application.
"""
return Task.objects(application=self)
@property
def running_tasks(self):
"""
List of all running tasks for this application.
"""
return self.tasks.filter(status=TaskStatus.running)
@property
def build_tasks(self):
"""
List of all build tasks for this application.
"""
return self.tasks.filter(flag=NeedsBuildingFlag.name)
@property
def running_build_tasks(self):
"""
Returns list of running build tasks for this application.
"""
return self.build_tasks.filter(status=TaskStatus.running)
@property
def flags(self):
"""
Return list of application flags.
"""
return ApplicationFlag.objects(application=self)
@property
def system_domain(self):
"""
Returns automatic system domain for this application.
"""
return '%s.%s' % (self.safe_id, self.upaas_config.apps.domains.system)
@property
def custom_domains(self):
"""
List of custom domains assigned for this application.
"""
return ApplicationDomain.objects(application=self)
@property
def domain_validation_code(self):
"""
String used for domain ownership validation.
"""
return "upaas-app-id=%s" % self.safe_id
def get_absolute_url(self):
return reverse('app_details', args=[self.safe_id])
def build_package(self, force_fresh=False, interpreter_version=None):
q = {
'set__options__{0:s}'.format(
NeedsBuildingFlag.Options.build_fresh_package): force_fresh,
'set__options__{0:s}'.format(
NeedsBuildingFlag.Options.build_interpreter_version):
interpreter_version,
'unset__pending': True,
'upsert': True
}
ApplicationFlag.objects(application=self,
name=NeedsBuildingFlag.name).update_one(**q)
def start_application(self):
if self.current_package:
if not self.run_plan:
log.error("Trying to start '%s' without run plan" % self.name)
return
scheduler = Scheduler()
backends = scheduler.find_backends(self.run_plan)
if not backends:
log.error(_("Can't start '{name}', no backend "
"available").format(name=self.name))
self.run_plan.delete()
return
self.run_plan.update(set__backends=backends)
ApplicationFlag.objects(
application=self, name=IsStartingFlag.name).update_one(
set__pending_backends=[b.backend for b in backends],
upsert=True)
# FIXME what if there are waiting stop tasks on other backends ?
self.flags.filter(name=NeedsStoppingFlag.name).delete()
def stop_application(self):
if self.current_package:
if not self.run_plan:
return
if self.run_plan and not self.run_plan.backends:
# no backends in run plan, just delete it
self.run_plan.delete()
return
ApplicationFlag.objects(
application=self, name=NeedsStoppingFlag.name).update_one(
set__pending_backends=[
b.backend for b in self.run_plan.backends], upsert=True)
self.flags.filter(
name__in=[IsStartingFlag.name, NeedsRestartFlag.name]).delete()
def restart_application(self):
if self.current_package:
if not self.run_plan:
return
ApplicationFlag.objects(
application=self, name=NeedsRestartFlag.name).update_one(
set__pending_backends=[
b.backend for b in self.run_plan.backends], upsert=True)
def upgrade_application(self):
if self.current_package:
if not self.run_plan:
return
ApplicationFlag.objects(
application=self, name=NeedsUpgradeFlag.name).update_one(
set__pending_backends=[
b.backend for b in self.run_plan.backends], upsert=True)
def update_application(self):
if self.run_plan:
current_backends = [bc.backend for bc in self.run_plan.backends]
scheduler = Scheduler()
new_backends = scheduler.find_backends(self.run_plan)
if not new_backends:
log.error(_("Can't update '{name}', no backend "
"available").format(name=self.name))
return
updated_backends = []
for backend_conf in new_backends:
if backend_conf.backend in current_backends:
# replace backend settings with updated version
self.run_plan.update(
pull__backends__backend=backend_conf.backend)
self.run_plan.update(push__backends=backend_conf)
updated_backends.append(backend_conf.backend)
else:
# add backend to run plan if not already there
ApplicationRunPlan.objects(
id=self.run_plan.id,
backends__backend__nin=[
backend_conf.backend]).update_one(
push__backends=backend_conf)
log.info(_("Starting {name} on backend {backend}").format(
name=self.name, backend=backend_conf.backend.name))
ApplicationFlag.objects(
pending_backends__ne=backend_conf.backend,
application=self,
name=IsStartingFlag.name).update_one(
add_to_set__pending_backends=backend_conf.backend,
upsert=True)
if updated_backends:
ApplicationFlag.objects(
application=self, name=NeedsRestartFlag.name).update_one(
set__pending_backends=updated_backends, upsert=True)
for backend in current_backends:
if backend not in [bc.backend for bc in new_backends]:
log.info(_("Stopping {name} on old backend "
"{backend}").format(name=self.name,
backend=backend.name))
ApplicationFlag.objects(
pending_backends__ne=backend,
application=self,
name=NeedsStoppingFlag.name).update_one(
add_to_set__pending_backends=backend, upsert=True)
def trim_package_files(self):
"""
Removes over limit package files from database. Number of packages per
app that are kept in database for rollback feature are set in user
limits as 'packages_per_app'.
"""
storage = load_handler(self.upaas_config.storage.handler,
self.upaas_config.storage.settings)
if not storage:
log.error("Storage handler '%s' not found, cannot trim "
"packages" % self.upaas_config.storage.handler)
return
removed = 0
for pkg in Package.objects(application=self, filename__exists=True)[
self.owner.limits['packages_per_app']:]:
if pkg.id == self.current_package.id:
continue
removed += 1
pkg.delete_package_file(null_filename=True)
if removed:
log.info("Removed %d package file(s) for app %s" % (removed,
self.name))
def remove_unpacked_packages(self, exclude=None, timeout=None):
"""
Remove all but current unpacked packages
"""
if timeout is None:
timeout = self.upaas_config.commands.timelimit
log.info(_("Cleaning packages for {name}").format(name=self.name))
for pkg in self.packages:
if exclude and pkg.id in exclude:
# skip current package!
continue
if os.path.isdir(pkg.package_path):
log.info(_("Removing package directory {path}").format(
path=pkg.package_path))
# if there are running pids inside package dir we will need to
# wait this should only happen during upgrade, when we need to
# wait for app to reload into new package dir
started_at = datetime.datetime.now()
timeout_at = datetime.datetime.now() + datetime.timedelta(
seconds=timeout)
pids = processes.directory_pids(pkg.package_path)
while pids:
if datetime.datetime.now() > timeout_at:
log.error(_("Timeout reached while waiting for pids "
"in {path} to die, killing any remaining "
"processes").format(
path=pkg.package_path))
break
log.info(_("Waiting for {pids} pid(s) in {path} to "
"terminate").format(pids=len(pids),
path=pkg.package_path))
time.sleep(2)
pids = processes.directory_pids(pkg.package_path)
try:
processes.kill_and_remove_dir(pkg.package_path)
except OSError as e:
log.error(_("Exception during package directory cleanup: "
"{e}").format(e=e))
signals.pre_delete.connect(Package.pre_delete, sender=Package)
| gpl-3.0 | 8,420,968,401,472,349,000 | 36.952153 | 79 | 0.579078 | false | 4.466216 | true | false | false |
Fokko/incubator-airflow | airflow/hooks/hive_hooks.py | 1 | 39213 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import socket
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.security import utils
from airflow.utils.file import TemporaryDirectory
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var():
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: str
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or conf.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self):
"""
This function set the proper proxy_user value in case the user overwtire the default.
"""
conn = self.conn
proxy_user_value = conn.extra_dejson.get('proxy_user', "")
if proxy_user_value == "login" and conn.login:
return "hive.server2.proxy.user={0}".format(conn.login)
if proxy_user_value == "owner" and self.run_as:
return "hive.server2.proxy.user={0}".format(self.run_as)
if proxy_user_value != "": # There is a custom proxy user
return "hive.server2.proxy.user={0}".format(proxy_user_value)
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = self._get_proxy_user()
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
@staticmethod
def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(schema=schema, hql=hql)
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search(r'(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id='metastore_default'):
self.conn_id = metastore_conn_id
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self._find_valid_server()
if ms is None:
raise AirflowException("Failed to locate the valid server.")
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
conn_socket = TSocket.TSocket(ms.host, ms.port)
if conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_server(self):
conns = self.get_connections(self.conn_id)
for conn in conns:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", conn.host, conn.port)
if host_socket.connect_ex((conn.host, conn.port)) == 0:
self.log.info("Connected to %s:%s", conn.host, conn.port)
host_socket.close()
return conn
else:
self.log.info("Could not connect to %s:%s", conn.host, conn.port)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the pyhive library
Notes:
* the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default')
def _get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
from pyhive.exc import ProgrammingError
if isinstance(hql, str):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, \
contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
db = self.get_connection(self.hiveserver2_conn_id)
if db.extra_dejson.get('run_set_variable_statements', True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute("set {}={}".format(k, v))
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (lowered_statement.startswith('select') or
lowered_statement.startswith('with') or
lowered_statement.startswith('show') or
(lowered_statement.startswith('set') and
'=' not in lowered_statement)):
description = [c for c in cur.description]
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(repr(description),
repr(previous_description))
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as file:
writer = csv.writer(file,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 | 80,130,069,523,703,410 | 38.291583 | 93 | 0.543468 | false | 4.272965 | false | false | false |
elego/tkobr-addons | tko_project_task_status/models/project_task.py | 1 | 5572 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, api, fields
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval
import time
from odoo.exceptions import Warning
class ProjectTaskActions(models.Model):
_name = 'project.task.action'
name = fields.Char(string='Name', required=True)
expected_duration = fields.Integer(u'Expected Time', default=1, required=True)
expected_duration_unit = fields.Selection([('d', 'Day'), ('w', 'Week'), ('m', 'Month'), ('y', 'Year')],
default='d', required=True, string=u'Expected Time Unit')
filter_id = fields.Many2one('ir.filters','Filter')
filter_warning_message = fields.Text("Warning Message")
done_server_action_id = fields.Many2one('ir.actions.server', string='Done Server Rule', help=u'This server action will be executed when Actions is set to done')
cancel_server_action_id = fields.Many2one('ir.actions.server', string='Cancel Server Rule', help=u'This server action will be executed when Actions is set to cancel')
class ProjectTaskActionsLine(models.Model):
_name = 'project.task.action.line'
action_id = fields.Many2one('project.task.action', u'Actions')
expected_date = fields.Date(u'Expected Date')
done_date = fields.Date(u'Done Date', readonly=True)
task_id = fields.Many2one('project.task', 'Task')
state = fields.Selection([('i', u'In Progress'), ('d', u'Done'), ('c', u'Cancelled')], default='i', required=True,
string='State')
@api.model
def _eval_context(self):
"""Returns a dictionary to use as evaluation context for
ir.rule domains."""
return {'user': self.env.user, 'time': time}
#Validate action filter
def validate_action_filter(self):
"""
Context must have active_id
:return:
"""
model_name = 'project.task'
eval_context = self._eval_context()
active_id = self.task_id.id
if active_id and model_name:
domain = self.action_id.filter_id.domain
rule = expression.normalize_domain(safe_eval(domain, eval_context))
Query = self.env[model_name].sudo()._where_calc(rule, active_test=False)
from_clause, where_clause, where_clause_params = Query.get_sql()
where_str = where_clause and (" WHERE %s" % where_clause) or ''
query_str = 'SELECT id FROM ' + from_clause + where_str
self._cr.execute(query_str, where_clause_params)
result = self._cr.fetchall()
if active_id in [id[0] for id in result]:
return True
return False
def set_done(self):
if self.action_id.filter_id:
# validate filter here
if not self.validate_action_filter():
raise Warning(self.action_id.filter_warning_message or "Warning message not set")
#set to done and execute server action
self.write({'state': 'd', 'done_date':fields.Date.today()})
if self.action_id.done_server_action_id:
new_context = dict(self.env.context)
if 'active_id' not in new_context.keys():
new_context.update({'active_id': self.task_id.id,'active_model':'project.task'})
recs = self.action_id.done_server_action_id.with_context(new_context)
recs.run()
def set_cancel(self):
self.state = 'c'
if self.action_id.cancel_server_action_id:
self.action_id.cancel_server_action_id.run()
@api.onchange('action_id')
def onchange_action(self):
if self.action_id:
days = weeks = months = years = 0
if self.action_id.expected_duration_unit == 'd':
days = self.action_id.expected_duration
if self.action_id.expected_duration_unit == 'w':
weeks = self.action_id.expected_duration
if self.action_id.expected_duration_unit == 'm':
months = self.action_id.expected_duration
if self.action_id.expected_duration_unit == 'y':
years = self.action_id.expected_duration
self.expected_date = datetime.today() + relativedelta(years=years, months=months, weeks=weeks, days=days)
class ProjectTask(models.Model):
_inherit = 'project.task'
action_line_ids = fields.One2many('project.task.action.line', 'task_id', 'Actions')
| agpl-3.0 | 3,694,331,997,314,282,500 | 43.935484 | 170 | 0.618988 | false | 3.926709 | false | false | false |
v-legoff/accertin | lyntin/ui/message.py | 1 | 1613 | #######################################################################
# This file is part of Lyntin.
# copyright (c) Free Software Foundation 2001, 2002
#
# Lyntin is distributed under the GNU General Public License license. See the
# file LICENSE for distribution details.
# $Id: message.py,v 1.1 2003/08/01 00:14:52 willhelm Exp $
#######################################################################
"""
Holds the ui's Message class. This gets passed around Lyntin and
allows us to scope data going to the ui.
"""
""" The message type constants."""
ERROR = "ERROR: "
USERDATA = "USERDATA: "
MUDDATA = "MUDDATA: "
LTDATA = "LTDATA: "
""" Used for debugging purposes."""
MESSAGETYPES = {ERROR: "ERROR: ",
USERDATA: "USERDATA: ",
MUDDATA: "MUDDATA: ",
LTDATA: "LTDATA: "}
class Message:
"""
Encapsulates a message to be written to the user.
"""
def __init__(self, data, messagetype=LTDATA, ses=None):
"""
Initialize.
@param data: the message string
@type data: string
@param messagetype: the message type (use a constant defined in ui.ui)
@type messagetype: int
@param ses: the session this message belongs to
@type ses: session.Session
"""
self.session = ses
self.data = data
self.type = messagetype
def __repr__(self):
"""
Represents the message (returns data + type).
"""
return repr(self.session) + MESSAGETYPES[self.type] + repr(self.data)
def __str__(self):
"""
The string representation of the Message is the data
itself.
"""
return self.data
| gpl-3.0 | -5,694,003,833,159,969,000 | 25.883333 | 78 | 0.578425 | false | 3.751163 | false | false | false |
rbooth200/DiscEvolution | DiscEvolution/driver.py | 1 | 12630 | # driver.py
#
# Author: R. Booth
# Date: 17 - Nov - 2016
#
# Combined model for dust, gas and chemical evolution
################################################################################
from __future__ import print_function
import numpy as np
import os
from .photoevaporation import FixedExternalEvaporation
from .constants import yr
from . import io
class DiscEvolutionDriver(object):
"""Driver class for full evolution model.
Required Arguments:
disc : Disc model to update
Optional Physics update:
dust : Update the dust, i.e. radial drift
gas : Update due to gas effects, i.e. Viscous evolution
diffusion : Seperate diffusion update
internal_photo : Remove gas by internal photoevaporation
photoevaporation : Remove gas by external photoevaporation
chemistry : Solver for the chemical evolution
History:
history : Tracks values of key parameters over time
Note: Diffusion is usually handled in the dust dynamics module
Other options:
t0 : Starting time, default = 0, code units
t_out:Previous output times, default = None, years
"""
def __init__(self, disc, gas=None, dust=None, diffusion=None, chemistry=None, ext_photoevaporation=None, int_photoevaporation=None, history=None, t0=0.):
self._disc = disc
self._gas = gas
self._dust = dust
self._diffusion = diffusion
self._chemistry = chemistry
self._external_photo = ext_photoevaporation
self._internal_photo = int_photoevaporation
self._history = history
self._t = t0
self._nstep = 0
def __call__(self, tmax):
"""Evolve the disc for a single timestep
args:
dtmax : Upper limit to time-step
returns:
dt : Time step taken
"""
disc = self._disc
# Compute the maximum time-step
dt = tmax - self.t
if self._gas:
dt = min(dt, self._gas.max_timestep(self._disc))
if self._dust:
v_visc = self._gas.viscous_velocity(disc)
dt = min(dt, self._dust.max_timestep(self._disc, v_visc))
if self._dust._diffuse:
dt = min(dt, self._dust._diffuse.max_timestep(self._disc))
if self._diffusion:
dt = min(dt, self._diffusion.max_timestep(self._disc))
if self._external_photo and hasattr(self._external_photo,"_density"): # If we are using density to calculate mass loss rates, we need to limit the time step based on photoevaporation
(dM_dot, dM_gas) = self._external_photo.optically_thin_weighting(disc)
Dt = dM_gas[(dM_dot>0)] / dM_dot[(dM_dot>0)]
Dt_min = np.min(Dt)
dt = min(dt,Dt_min)
# Determine tracers for dust step
gas_chem, ice_chem = None, None
dust = None
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
# Do dust evolution
if self._dust:
self._dust(dt, disc,
gas_tracers=gas_chem,
dust_tracers=ice_chem, v_visc=v_visc)
# Determine tracers for gas steps
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
try:
dust = disc.dust_frac
except AttributeError:
pass
# Do Advection-diffusion update
if self._gas:
self._gas(dt, disc, [dust, gas_chem, ice_chem])
if self._diffusion:
if gas_chem is not None:
gas_chem[:] += dt * self._diffusion(disc, gas_chem)
if ice_chem is not None:
ice_chem[:] += dt * self._diffusion(disc, ice_chem)
if dust is not None:
dust[:] += dt * self._diffusion(disc, dust)
# Do external photoevaporation
if self._external_photo:
self._external_photo(disc, dt)
# Do internal photoevaporation
if self._internal_photo:
self._internal_photo(disc, dt/yr, self._external_photo)
# Pin the values to >= 0 and <=1:
disc.Sigma[:] = np.maximum(disc.Sigma, 0)
try:
disc.dust_frac[:] = np.maximum(disc.dust_frac, 0)
disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0)
except AttributeError:
pass
try:
disc.chem.gas.data[:] = np.maximum(disc.chem.gas.data, 0)
disc.chem.ice.data[:] = np.maximum(disc.chem.ice.data, 0)
except AttributeError:
pass
# Chemistry
if self._chemistry:
rho = disc.midplane_gas_density
eps = disc.dust_frac.sum(0)
grain_size = disc.grain_size[-1]
T = disc.T
self._chemistry.update(dt, T, rho, eps, disc.chem,
grain_size=grain_size)
# If we have dust, we should update it now the ice fraction has
# changed
disc.update_ices(disc.chem.ice)
# Now we should update the auxillary properties, do grain growth etc
disc.update(dt)
self._t += dt
self._nstep += 1
return dt
@property
def disc(self):
return self._disc
@property
def t(self):
return self._t
@property
def num_steps(self):
return self._nstep
@property
def gas(self):
return self._gas
@property
def dust(self):
return self._dust
@property
def diffusion(self):
return self._diffusion
@property
def chemistry(self):
return self._chemistry
@property
def photoevaporation_external(self):
return self._external_photo
@property
def photoevaporation_internal(self):
return self._internal_photo
@property
def history(self):
return self._history
def dump_ASCII(self, filename):
"""Write the current state to a file, including header information"""
# Put together a header containing information about the physics
# included
head = ''
if self._gas:
head += self._gas.ASCII_header() + '\n'
if self._dust:
head += self._dust.ASCII_header() + '\n'
if self._diffusion:
head += self._diffusion.ASCII_header() + '\n'
if self._chemistry:
head += self._chemistry.ASCII_header() + '\n'
if self._external_photo:
head += self._external_photo.ASCII_header() + '\n'
if self._internal_photo:
head += self._internal_photo.ASCII_header() + '\n'
# Write it all to disc
io.dump_ASCII(filename, self._disc, self.t, head)
def dump_hdf5(self, filename):
"""Write the current state in HDF5 format, with header information"""
headers = []
if self._gas: headers.append(self._gas.HDF5_attributes())
if self._dust: headers.append(self._dust.HDF5_attributes())
if self._diffusion: headers.append(self._diffusion.HDF5_attributes())
if self._chemistry: headers.append(self._chemistry.HDF5_attributes())
if self._external_photo: headers.append(self._external_photo.HDF5_attributes())
if self._internal_photo: headers.append(self._internal_photo.HDF5_attributes())
io.dump_hdf5(filename, self._disc, self.t, headers)
if __name__ == "__main__":
from .star import SimpleStar
from .grid import Grid
from .eos import IrradiatedEOS
from .viscous_evolution import ViscousEvolution
from .dust import DustGrowthTwoPop, SingleFluidDrift
from .opacity import Zhu2012, Tazzari2016
from .diffusion import TracerDiffusion
from .chemistry import TimeDepCOChemOberg, SimpleCOAtomAbund
from .constants import Msun, AU
from .disc_utils import mkdir_p
import matplotlib.pyplot as plt
alpha = 1e-3
Mdot = 1e-8
Rd = 100.
#kappa = Zhu2012
kappa = Tazzari2016()
N_cell = 250
R_in = 0.1
R_out = 500.
yr = 2*np.pi
output_dir = 'test_DiscEvo'
output_times = np.arange(0, 4) * 1e6 * yr
plot_times = np.array([0, 1e4, 1e5, 5e5, 1e6, 3e6])*yr
# Setup the initial conditions
Mdot *= (Msun / yr) / AU**2
grid = Grid(R_in, R_out, N_cell, spacing='natural')
star = SimpleStar(M=1, R=2.5, T_eff=4000.)
# Initial guess for Sigma:
R = grid.Rc
Sigma = (Mdot / (0.1 * alpha * R**2 * star.Omega_k(R))) * np.exp(-R/Rd)
# Iterate until constant Mdot
eos = IrradiatedEOS(star, alpha, kappa=kappa)
eos.set_grid(grid)
eos.update(0, Sigma)
for i in range(100):
Sigma = 0.5 * (Sigma + (Mdot / (3 * np.pi * eos.nu)) * np.exp(-R/Rd))
eos.update(0, Sigma)
# Create the disc object
disc = DustGrowthTwoPop(grid, star, eos, 0.01, Sigma=Sigma)
# Setup the chemistry
chemistry = TimeDepCOChemOberg(a=1e-5)
# Setup the dust-to-gas ratio from the chemistry
solar_abund = SimpleCOAtomAbund(N_cell)
solar_abund.set_solar_abundances()
# Iterate ice fractions to get the dust-to-gas ratio:
for i in range(10):
chem = chemistry.equilibrium_chem(disc.T,
disc.midplane_gas_density,
disc.dust_frac.sum(0),
solar_abund)
disc.initialize_dust_density(chem.ice.total_abund)
disc.chem = chem
# Setup the dynamics modules:
gas = ViscousEvolution()
dust = SingleFluidDrift(TracerDiffusion())
evo = DiscEvolutionDriver(disc, gas=gas, dust=dust, chemistry=chemistry)
# Setup the IO controller
IO = io.Event_Controller(save=output_times, plot=plot_times)
# Run the model!
while not IO.finished():
ti = IO.next_event_time()
while evo.t < ti:
dt = evo(ti)
if (evo.num_steps % 1000) == 0:
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / yr))
print('dt: {} yr'.format(dt / yr))
if IO.check_event(evo.t, 'save'):
from .disc_utils import mkdir_p
mkdir_p(output_dir)
snap_name = 'disc_{:04d}.dat'.format(IO.event_number('save'))
evo.dump_ASCII(os.path.join(output_dir, snap_name))
snap_name = 'disc_{:04d}.h5'.format(IO.event_number('save'))
evo.dump_hdf5(os.path.join(output_dir, snap_name))
if IO.check_event(evo.t, 'plot'):
err_state = np.seterr(all='warn')
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / (2 * np.pi)))
plt.subplot(321)
l, = plt.loglog(grid.Rc, evo.disc.Sigma_G)
plt.loglog(grid.Rc, evo.disc.Sigma_D.sum(0), '--', c=l.get_color())
plt.xlabel('$R$')
plt.ylabel('$\Sigma_\mathrm{G, D}$')
plt.subplot(322)
plt.loglog(grid.Rc, evo.disc.dust_frac.sum(0))
plt.xlabel('$R$')
plt.ylabel('$\epsilon$')
plt.subplot(323)
plt.loglog(grid.Rc, evo.disc.Stokes()[1])
plt.xlabel('$R$')
plt.ylabel('$St$')
plt.subplot(324)
plt.loglog(grid.Rc, evo.disc.grain_size[1])
plt.xlabel('$R$')
plt.ylabel('$a\,[\mathrm{cm}]$')
plt.subplot(325)
gCO = evo.disc.chem.gas.atomic_abundance()
sCO = evo.disc.chem.ice.atomic_abundance()
gCO.data[:] /= solar_abund.data
sCO.data[:] /= solar_abund.data
c = l.get_color()
plt.semilogx(grid.Rc, gCO['C'], '-', c=c, linewidth=1)
plt.semilogx(grid.Rc, gCO['O'], '-', c=c, linewidth=2)
plt.semilogx(grid.Rc, sCO['C'], ':', c=c, linewidth=1)
plt.semilogx(grid.Rc, sCO['O'], ':', c=c, linewidth=2)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[X]_\mathrm{solar}$')
plt.subplot(326)
plt.semilogx(grid.Rc, gCO['C'] / gCO['O'], '-', c=c)
plt.semilogx(grid.Rc, sCO['C'] / sCO['O'], ':', c=c)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[C/O]_\mathrm{solar}$')
np.seterr(**err_state)
IO.pop_events(evo.t)
if len(plot_times) > 0:
plt.show()
| gpl-3.0 | -6,679,079,293,049,471,000 | 31.976501 | 190 | 0.553286 | false | 3.419058 | false | false | false |
ppietrasa/grpc | tools/distrib/check_copyright.py | 1 | 5538 | #!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import datetime
import os
import re
import sys
import subprocess
# find our home
ROOT = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(description='copyright checker')
argp.add_argument('-o', '--output',
default='details',
choices=['list', 'details'])
argp.add_argument('-s', '--skips',
default=0,
action='store_const',
const=1)
argp.add_argument('-a', '--ancient',
default=0,
action='store_const',
const=1)
argp.add_argument('-f', '--fix',
default=False,
action='store_true');
argp.add_argument('--precommit',
default=False,
action='store_true')
args = argp.parse_args()
# open the license text
with open('LICENSE') as f:
LICENSE = f.read().splitlines()
# license format by file extension
# key is the file extension, value is a format string
# that given a line of license text, returns what should
# be in the file
LICENSE_PREFIX = {
'.bat': r'@rem\s*',
'.c': r'\s*(?://|\*)\s*',
'.cc': r'\s*(?://|\*)\s*',
'.h': r'\s*(?://|\*)\s*',
'.m': r'\s*\*\s*',
'.php': r'\s*\*\s*',
'.js': r'\s*\*\s*',
'.py': r'#\s*',
'.pyx': r'#\s*',
'.pxd': r'#\s*',
'.pxi': r'#\s*',
'.rb': r'#\s*',
'.sh': r'#\s*',
'.proto': r'//\s*',
'.cs': r'//\s*',
'.mak': r'#\s*',
'Makefile': r'#\s*',
'Dockerfile': r'#\s*',
'LICENSE': '',
}
_EXEMPT = frozenset((
# Generated protocol compiler output.
'examples/python/helloworld/helloworld_pb2.py',
'examples/python/helloworld/helloworld_pb2_grpc.py',
'examples/python/multiplex/helloworld_pb2.py',
'examples/python/multiplex/helloworld_pb2_grpc.py',
'examples/python/multiplex/route_guide_pb2.py',
'examples/python/multiplex/route_guide_pb2_grpc.py',
'examples/python/route_guide/route_guide_pb2.py',
'examples/python/route_guide/route_guide_pb2_grpc.py',
# An older file originally from outside gRPC.
'src/php/tests/bootstrap.php',
))
RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+), Google Inc\.'
RE_LICENSE = dict(
(k, r'\n'.join(
LICENSE_PREFIX[k] +
(RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE))
for k, v in LICENSE_PREFIX.iteritems())
if args.precommit:
FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
else:
FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | grep -v ^third_party/'
def load(name):
with open(name) as f:
return f.read()
def save(name, text):
with open(name, 'w') as f:
f.write(text)
assert(re.search(RE_LICENSE['LICENSE'], load('LICENSE')))
assert(re.search(RE_LICENSE['Makefile'], load('Makefile')))
def log(cond, why, filename):
if not cond: return
if args.output == 'details':
print '%s: %s' % (why, filename)
else:
print filename
# scan files, validate the text
ok = True
filename_list = []
try:
filename_list = subprocess.check_output(FILE_LIST_COMMAND,
shell=True).splitlines()
except subprocess.CalledProcessError:
sys.exit(0)
for filename in filename_list:
if filename in _EXEMPT:
continue
ext = os.path.splitext(filename)[1]
base = os.path.basename(filename)
if ext in RE_LICENSE:
re_license = RE_LICENSE[ext]
elif base in RE_LICENSE:
re_license = RE_LICENSE[base]
else:
log(args.skips, 'skip', filename)
continue
try:
text = load(filename)
except:
continue
m = re.search(re_license, text)
if m:
pass
elif 'DO NOT EDIT' not in text and filename != 'src/boringssl/err_data.c':
log(1, 'copyright missing', filename)
ok = False
sys.exit(0 if ok else 1)
| bsd-3-clause | 5,643,336,900,783,706,000 | 30.465909 | 83 | 0.631636 | false | 3.485211 | false | false | false |
foosel/OctoPrint | src/octoprint/access/users.py | 1 | 40260 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from flask_login import UserMixin, AnonymousUserMixin
from werkzeug.local import LocalProxy
import hashlib
import io
import os
import yaml
import uuid
import wrapt
import time
import logging
# noinspection PyCompatibility
from builtins import range, bytes
from octoprint.settings import settings as s
from octoprint.util import atomic_write, to_bytes, deprecated, monotonic_time, generate_api_key
from octoprint.util import get_fully_qualified_classname as fqcn
from octoprint.access.permissions import Permissions, OctoPrintPermission
from octoprint.access.groups import GroupChangeListener, Group
from past.builtins import basestring
class UserManager(GroupChangeListener, object):
def __init__(self, group_manager, settings=None):
self._group_manager = group_manager
self._group_manager.register_listener(self)
self._logger = logging.getLogger(__name__)
self._session_users_by_session = dict()
self._sessionids_by_userid = dict()
self._enabled = True
if settings is None:
settings = s()
self._settings = settings
self._login_status_listeners = []
def register_login_status_listener(self, listener):
self._login_status_listeners.append(listener)
def unregister_login_status_listener(self, listener):
self._login_status_listeners.remove(listener)
def anonymous_user_factory(self):
if self.enabled:
return AnonymousUser([self._group_manager.guest_group])
else:
return AdminUser([self._group_manager.admin_group, self._group_manager.user_group])
def api_user_factory(self):
return ApiUser([self._group_manager.admin_group, self._group_manager.user_group])
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
def login_user(self, user):
self._cleanup_sessions()
if user is None or user.is_anonymous:
return
if isinstance(user, LocalProxy):
# noinspection PyProtectedMember
user = user._get_current_object()
if not isinstance(user, User):
return None
if not isinstance(user, SessionUser):
user = SessionUser(user)
self._session_users_by_session[user.session] = user
userid = user.get_id()
if not userid in self._sessionids_by_userid:
self._sessionids_by_userid[userid] = set()
self._sessionids_by_userid[userid].add(user.session)
for listener in self._login_status_listeners:
try:
listener.on_user_logged_in(user)
except Exception:
self._logger.exception("Error in on_user_logged_in on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
self._logger.info("Logged in user: {}".format(user.get_id()))
return user
def logout_user(self, user, stale=False):
if user is None or user.is_anonymous or isinstance(user, AdminUser):
return
if isinstance(user, LocalProxy):
user = user._get_current_object()
if not isinstance(user, SessionUser):
return
userid = user.get_id()
sessionid = user.session
if userid in self._sessionids_by_userid:
try:
self._sessionids_by_userid[userid].remove(sessionid)
except KeyError:
pass
if sessionid in self._session_users_by_session:
try:
del self._session_users_by_session[sessionid]
except KeyError:
pass
for listener in self._login_status_listeners:
try:
listener.on_user_logged_out(user, stale=stale)
except Exception:
self._logger.exception("Error in on_user_logged_out on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
self._logger.info("Logged out user: {}".format(user.get_id()))
def _cleanup_sessions(self):
for session, user in list(self._session_users_by_session.items()):
if not isinstance(user, SessionUser):
continue
if user.created + (24 * 60 * 60) < monotonic_time():
self._logger.info("Cleaning up user session {} for user {}".format(session, user.get_id()))
self.logout_user(user, stale=True)
@staticmethod
def create_password_hash(password, salt=None, settings=None):
if not salt:
if settings is None:
settings = s()
salt = settings.get(["accessControl", "salt"])
if salt is None:
import string
from random import choice
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
salt = "".join(choice(chars) for _ in range(32))
settings.set(["accessControl", "salt"], salt)
settings.save()
return hashlib.sha512(to_bytes(password, encoding="utf-8", errors="replace") + to_bytes(salt)).hexdigest()
def check_password(self, username, password):
user = self.find_user(username)
if not user:
return False
hash = UserManager.create_password_hash(password,
settings=self._settings)
if user.check_password(hash):
# new hash matches, correct password
return True
else:
# new hash doesn't match, but maybe the old one does, so check that!
oldHash = UserManager.create_password_hash(password,
salt="mvBUTvwzBzD3yPwvnJ4E4tXNf3CGJvvW",
settings=self._settings)
if user.check_password(oldHash):
# old hash matches, we migrate the stored password hash to the new one and return True since it's the correct password
self.change_user_password(username, password)
return True
else:
# old hash doesn't match either, wrong password
return False
def add_user(self, username, password, active, permissions, groups, overwrite=False):
pass
def change_user_activation(self, username, active):
pass
def change_user_permissions(self, username, permissions):
pass
def add_permissions_to_user(self, username, permissions):
pass
def remove_permissions_from_user(self, username, permissions):
pass
def change_user_groups(self, username, groups):
pass
def add_groups_to_user(self, username, groups):
pass
def remove_groups_from_user(self, username, groups):
pass
def remove_groups_from_users(self, group):
pass
def change_user_password(self, username, password):
pass
def get_user_setting(self, username, key):
return None
def get_all_user_settings(self, username):
return dict()
def change_user_setting(self, username, key, value):
pass
def change_user_settings(self, username, new_settings):
pass
def remove_user(self, username):
if username in self._sessionids_by_userid:
sessions = self._sessionids_by_userid[username]
for session in sessions:
if session in self._session_users_by_session:
del self._session_users_by_session[session]
del self._sessionids_by_userid[username]
def find_user(self, userid=None, session=None):
if session is not None and session in self._session_users_by_session:
user = self._session_users_by_session[session]
if userid is None or userid == user.get_id():
return user
return None
def find_sessions_for(self, matcher):
result = []
for user in self.get_all_users():
if matcher(user):
try:
session_ids = self._sessionids_by_userid[user.get_id()]
for session_id in session_ids:
try:
result.append(self._session_users_by_session[session_id])
except KeyError:
# unknown session after all
continue
except KeyError:
# no session for user
pass
return result
def get_all_users(self):
return []
def has_been_customized(self):
return False
def on_group_removed(self, group):
self._logger.debug("Group {} got removed, removing from all users".format(group.key))
self.remove_groups_from_users([group])
def on_group_permissions_changed(self, group, added=None, removed=None):
users = self.find_sessions_for(lambda u: group in u.groups)
for listener in self._login_status_listeners:
try:
for user in users:
listener.on_user_modified(user)
except Exception:
self._logger.exception("Error in on_user_modified on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
def on_group_subgroups_changed(self, group, added=None, removed=None):
users = self.find_sessions_for(lambda u: group in u.groups)
for listener in self._login_status_listeners:
# noinspection PyBroadException
try:
for user in users:
listener.on_user_modified(user)
except Exception:
self._logger.exception("Error in on_user_modified on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
def _trigger_on_user_modified(self, user):
if isinstance(user, basestring):
# user id
users = []
try:
session_ids = self._sessionids_by_userid[user]
for session_id in session_ids:
try:
users.append(self._session_users_by_session[session_id])
except KeyError:
# unknown session id
continue
except KeyError:
# no session for user
return
elif isinstance(user, User) and not isinstance(user, SessionUser):
users = self.find_sessions_for(lambda u: u.get_id() == user.get_id())
elif isinstance(user, User):
users = [user]
else:
return
for listener in self._login_status_listeners:
try:
for user in users:
listener.on_user_modified(user)
except Exception:
self._logger.exception("Error in on_user_modified on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
def _trigger_on_user_removed(self, username):
for listener in self._login_status_listeners:
try:
listener.on_user_removed(username)
except Exception:
self._logger.exception("Error in on_user_removed on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
#~~ Deprecated methods follow
# TODO: Remove deprecated methods in OctoPrint 1.5.0
@staticmethod
def createPasswordHash(*args, **kwargs):
"""
.. deprecated: 1.4.0
Replaced by :func:`~UserManager.create_password_hash`
"""
# we can't use the deprecated decorator here since this method is static
import warnings
warnings.warn("createPasswordHash has been renamed to create_password_hash", DeprecationWarning, stacklevel=2)
return UserManager.create_password_hash(*args, **kwargs)
@deprecated("changeUserRoles has been replaced by change_user_permissions",
includedoc="Replaced by :func:`change_user_permissions`",
since="1.4.0")
def changeUserRoles(self, username, roles):
user = self.find_user(username)
if user is None:
raise UnknownUser(username)
removed_roles = set(user._roles) - set(roles)
self.removeRolesFromUser(username, removed_roles, user=user)
added_roles = set(roles) - set(user._roles)
self.addRolesToUser(username, added_roles, user=user)
@deprecated("addRolesToUser has been replaced by add_permissions_to_user",
includedoc="Replaced by :func:`add_permissions_to_user`",
since="1.4.0")
def addRolesToUser(self, username, roles, user=None):
if user is None:
user = self.find_user(username)
if user is None:
raise UnknownUser(username)
if "admin" in roles:
self.add_groups_to_user(username, self._group_manager.admin_group)
if "user" in roles:
self.remove_groups_from_user(username, self._group_manager.user_group)
@deprecated("removeRolesFromUser has been replaced by remove_permissions_from_user",
includedoc="Replaced by :func:`remove_permissions_from_user`",
since="1.4.0")
def removeRolesFromUser(self, username, roles, user=None):
if user is None:
user = self.find_user(username)
if user is None:
raise UnknownUser(username)
if "admin" in roles:
self.remove_groups_from_user(username, self._group_manager.admin_group)
self.remove_permissions_from_user(username, Permissions.ADMIN)
if "user" in roles:
self.remove_groups_from_user(username, self._group_manager.user_group)
checkPassword = deprecated("checkPassword has been renamed to check_password",
includedoc="Replaced by :func:`check_password`",
since="1.4.0")(check_password)
addUser = deprecated("addUser has been renamed to add_user",
includedoc="Replaced by :func:`add_user`",
since="1.4.0")(add_user)
changeUserActivation = deprecated("changeUserActivation has been renamed to change_user_activation",
includedoc="Replaced by :func:`change_user_activation`",
since="1.4.0")(change_user_activation)
changeUserPassword = deprecated("changeUserPassword has been renamed to change_user_password",
includedoc="Replaced by :func:`change_user_password`",
since="1.4.0")(change_user_password)
getUserSetting = deprecated("getUserSetting has been renamed to get_user_setting",
includedoc="Replaced by :func:`get_user_setting`",
since="1.4.0")(get_user_setting)
getAllUserSettings = deprecated("getAllUserSettings has been renamed to get_all_user_settings",
includedoc="Replaced by :func:`get_all_user_settings`",
since="1.4.0")(get_all_user_settings)
changeUserSetting = deprecated("changeUserSetting has been renamed to change_user_setting",
includedoc="Replaced by :func:`change_user_setting`",
since="1.4.0")(change_user_setting)
changeUserSettings = deprecated("changeUserSettings has been renamed to change_user_settings",
includedoc="Replaced by :func:`change_user_settings`",
since="1.4.0")(change_user_settings)
removeUser = deprecated("removeUser has been renamed to remove_user",
includedoc="Replaced by :func:`remove_user`",
since="1.4.0")(remove_user)
findUser = deprecated("findUser has been renamed to find_user",
includedoc="Replaced by :func:`find_user`",
since="1.4.0")(find_user)
getAllUsers = deprecated("getAllUsers has been renamed to get_all_users",
includedoc="Replaced by :func:`get_all_users`",
since="1.4.0")(get_all_users)
hasBeenCustomized = deprecated("hasBeenCustomized has been renamed to has_been_customized",
includedoc="Replaced by :func:`has_been_customized`",
since="1.4.0")(has_been_customized)
class LoginStatusListener(object):
def on_user_logged_in(self, user):
pass
def on_user_logged_out(self, user, stale=False):
pass
def on_user_modified(self, user):
pass
def on_user_removed(self, userid):
pass
##~~ FilebasedUserManager, takes available users from users.yaml file
class FilebasedUserManager(UserManager):
def __init__(self, group_manager, path=None, settings=None):
UserManager.__init__(self, group_manager, settings=settings)
if path is None:
path = self._settings.get(["accessControl", "userfile"])
if path is None:
path = os.path.join(s().getBaseFolder("base"), "users.yaml")
self._userfile = path
self._users = {}
self._dirty = False
self._customized = None
self._load()
def _load(self):
if os.path.exists(self._userfile) and os.path.isfile(self._userfile):
self._customized = True
with io.open(self._userfile, 'rt', encoding='utf-8') as f:
data = yaml.safe_load(f)
for name, attributes in data.items():
permissions = []
if "permissions" in attributes:
permissions = attributes["permissions"]
groups = {self._group_manager.user_group} # the user group is mandatory for all logged in users
if "groups" in attributes:
groups |= set(attributes["groups"])
# migrate from roles to permissions
if "roles" in attributes and not "permissions" in attributes:
self._logger.info("Migrating user {} to new granular permission system".format(name))
groups |= set(self._migrate_roles_to_groups(attributes["roles"]))
self._dirty = True
apikey = None
if "apikey" in attributes:
apikey = attributes["apikey"]
settings = dict()
if "settings" in attributes:
settings = attributes["settings"]
self._users[name] = User(username=name,
passwordHash=attributes["password"],
active=attributes["active"],
permissions=self._to_permissions(*permissions),
groups=self._to_groups(*groups),
apikey=apikey,
settings=settings)
for sessionid in self._sessionids_by_userid.get(name, set()):
if sessionid in self._session_users_by_session:
self._session_users_by_session[sessionid].update_user(self._users[name])
if self._dirty:
self._save()
else:
self._customized = False
def _save(self, force=False):
if not self._dirty and not force:
return
data = {}
for name, user in self._users.items():
if not user or not isinstance(user, User):
continue
data[name] = {
"password": user._passwordHash,
"active": user._active,
"groups": self._from_groups(*user._groups),
"permissions": self._from_permissions(*user._permissions),
"apikey": user._apikey,
"settings": user._settings,
# TODO: deprecated, remove in 1.5.0
"roles": user._roles
}
with atomic_write(self._userfile, mode='wt', permissions=0o600, max_permissions=0o666) as f:
yaml.safe_dump(data, f, default_flow_style=False, indent=4, allow_unicode=True)
self._dirty = False
self._load()
def _migrate_roles_to_groups(self, roles):
# If admin is inside the roles, just return admin group
if "admin" in roles:
return [self._group_manager.admin_group, self._group_manager.user_group]
else:
return [self._group_manager.user_group]
def _refresh_groups(self, user):
user._groups = self._to_groups(*map(lambda g: g.key, user.groups))
def add_user(self, username, password, active=False, permissions=None, groups=None, apikey=None, overwrite=False):
if not permissions:
permissions = []
permissions = self._to_permissions(*permissions)
if not groups:
groups = self._group_manager.default_groups
groups = self._to_groups(*groups)
if username in self._users and not overwrite:
raise UserAlreadyExists(username)
self._users[username] = User(username,
UserManager.create_password_hash(password,
settings=self._settings),
active,
permissions,
groups,
apikey=apikey)
self._dirty = True
self._save()
def change_user_activation(self, username, active):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].is_active != active:
self._users[username]._active = active
self._dirty = True
self._save()
self._trigger_on_user_modified(username)
def change_user_permissions(self, username, permissions):
if username not in self._users:
raise UnknownUser(username)
user = self._users[username]
permissions = self._to_permissions(*permissions)
removed_permissions = list(set(user._permissions) - set(permissions))
added_permissions = list(set(permissions) - set(user._permissions))
if len(removed_permissions) > 0:
user.remove_permissions_from_user(removed_permissions)
self._dirty = True
if len(added_permissions) > 0:
user.add_permissions_to_user(added_permissions)
self._dirty = True
if self._dirty:
self._save()
self._trigger_on_user_modified(username)
def add_permissions_to_user(self, username, permissions):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].add_permissions_to_user(self._to_permissions(*permissions)):
self._dirty = True
self._save()
self._trigger_on_user_modified(username)
def remove_permissions_from_user(self, username, permissions):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].remove_permissions_from_user(self._to_permissions(*permissions)):
self._dirty = True
self._save()
self._trigger_on_user_modified(username)
def remove_permissions_from_users(self, permissions):
modified = []
for user in self._users:
dirty = user.remove_permissions_from_user(self._to_permissions(*permissions))
if dirty:
self._dirty = True
modified.append(user.get_id())
if self._dirty:
self._save()
for username in modified:
self._trigger_on_user_modified(username)
def change_user_groups(self, username, groups):
if username not in self._users:
raise UnknownUser(username)
user = self._users[username]
groups = self._to_groups(*groups)
removed_groups = list(set(user._groups) - set(groups))
added_groups = list(set(groups) - set(user._groups))
if len(removed_groups):
self._dirty |= user.remove_groups_from_user(removed_groups)
if len(added_groups):
self._dirty |= user.add_groups_to_user(added_groups)
if self._dirty:
self._save()
self._trigger_on_user_modified(username)
def add_groups_to_user(self, username, groups, save=True, notify=True):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].add_groups_to_user(self._to_groups(*groups)):
self._dirty = True
if save:
self._save()
if notify:
self._trigger_on_user_modified(username)
def remove_groups_from_user(self, username, groups, save=True, notify=True):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].remove_groups_from_user(self._to_groups(*groups)):
self._dirty = True
if save:
self._save()
if notify:
self._trigger_on_user_modified(username)
def remove_groups_from_users(self, groups):
modified = []
for username, user in self._users.items():
dirty = user.remove_groups_from_user(self._to_groups(*groups))
if dirty:
self._dirty = True
modified.append(username)
if self._dirty:
self._save()
for username in modified:
self._trigger_on_user_modified(username)
def change_user_password(self, username, password):
if not username in self._users:
raise UnknownUser(username)
passwordHash = UserManager.create_password_hash(password,
settings=self._settings)
user = self._users[username]
if user._passwordHash != passwordHash:
user._passwordHash = passwordHash
self._dirty = True
self._save()
def change_user_setting(self, username, key, value):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
old_value = user.get_setting(key)
if not old_value or old_value != value:
user.set_setting(key, value)
self._dirty = self._dirty or old_value != value
self._save()
def change_user_settings(self, username, new_settings):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
for key, value in new_settings.items():
old_value = user.get_setting(key)
user.set_setting(key, value)
self._dirty = self._dirty or old_value != value
self._save()
def get_all_user_settings(self, username):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
return user.get_all_settings()
def get_user_setting(self, username, key):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
return user.get_setting(key)
def generate_api_key(self, username):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
user._apikey = generate_api_key()
self._dirty = True
self._save()
return user._apikey
def delete_api_key(self, username):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
user._apikey = None
self._dirty = True
self._save()
def remove_user(self, username):
UserManager.remove_user(self, username)
if not username in self._users:
raise UnknownUser(username)
del self._users[username]
self._dirty = True
self._save()
def find_user(self, userid=None, apikey=None, session=None):
user = UserManager.find_user(self, userid=userid, session=session)
if user is not None:
return user
if userid is not None:
if userid not in self._users:
return None
return self._users[userid]
elif apikey is not None:
for user in self._users.values():
if apikey == user._apikey:
return user
return None
else:
return None
def get_all_users(self):
return list(self._users.values())
def has_been_customized(self):
return self._customized
def on_group_permissions_changed(self, group, added=None, removed=None):
# refresh our group references
for user in self.get_all_users():
if group in user.groups:
self._refresh_groups(user)
# call parent
UserManager.on_group_permissions_changed(self, group, added=added, removed=removed)
def on_group_subgroups_changed(self, group, added=None, removed=None):
# refresh our group references
for user in self.get_all_users():
if group in user.groups:
self._refresh_groups(user)
# call parent
UserManager.on_group_subgroups_changed(self, group, added=added, removed=removed)
#~~ Helpers
def _to_groups(self, *groups):
return list(set(filter(lambda x: x is not None,
(self._group_manager._to_group(group) for group in groups))))
def _to_permissions(self, *permissions):
return list(set(filter(lambda x: x is not None,
(Permissions.find(permission) for permission in permissions))))
def _from_groups(self, *groups):
return list(set(group.key for group in groups))
def _from_permissions(self, *permissions):
return list(set(permission.key for permission in permissions))
# ~~ Deprecated methods follow
# TODO: Remove deprecated methods in OctoPrint 1.5.0
generateApiKey = deprecated("generateApiKey has been renamed to generate_api_key",
includedoc="Replaced by :func:`generate_api_key`",
since="1.4.0")(generate_api_key)
deleteApiKey = deprecated("deleteApiKey has been renamed to delete_api_key",
includedoc="Replaced by :func:`delete_api_key`",
since="1.4.0")(delete_api_key)
addUser = deprecated("addUser has been renamed to add_user",
includedoc="Replaced by :func:`add_user`",
since="1.4.0")(add_user)
changeUserActivation = deprecated("changeUserActivation has been renamed to change_user_activation",
includedoc="Replaced by :func:`change_user_activation`",
since="1.4.0")(change_user_activation)
changeUserPassword = deprecated("changeUserPassword has been renamed to change_user_password",
includedoc="Replaced by :func:`change_user_password`",
since="1.4.0")(change_user_password)
getUserSetting = deprecated("getUserSetting has been renamed to get_user_setting",
includedoc="Replaced by :func:`get_user_setting`",
since="1.4.0")(get_user_setting)
getAllUserSettings = deprecated("getAllUserSettings has been renamed to get_all_user_settings",
includedoc="Replaced by :func:`get_all_user_settings`",
since="1.4.0")(get_all_user_settings)
changeUserSetting = deprecated("changeUserSetting has been renamed to change_user_setting",
includedoc="Replaced by :func:`change_user_setting`",
since="1.4.0")(change_user_setting)
changeUserSettings = deprecated("changeUserSettings has been renamed to change_user_settings",
includedoc="Replaced by :func:`change_user_settings`",
since="1.4.0")(change_user_settings)
removeUser = deprecated("removeUser has been renamed to remove_user",
includedoc="Replaced by :func:`remove_user`",
since="1.4.0")(remove_user)
findUser = deprecated("findUser has been renamed to find_user",
includedoc="Replaced by :func:`find_user`",
since="1.4.0")(find_user)
getAllUsers = deprecated("getAllUsers has been renamed to get_all_users",
includedoc="Replaced by :func:`get_all_users`",
since="1.4.0")(get_all_users)
hasBeenCustomized = deprecated("hasBeenCustomized has been renamed to has_been_customized",
includedoc="Replaced by :func:`has_been_customized`",
since="1.4.0")(has_been_customized)
##~~ Exceptions
class UserAlreadyExists(Exception):
def __init__(self, username):
Exception.__init__(self, "User %s already exists" % username)
class UnknownUser(Exception):
def __init__(self, username):
Exception.__init__(self, "Unknown user: %s" % username)
class UnknownRole(Exception):
def _init_(self, role):
Exception.__init__(self, "Unknown role: %s" % role)
##~~ Refactoring helpers
class MethodReplacedByBooleanProperty(object):
def __init__(self, name, message, getter):
self._name = name
self._message = message
self._getter = getter
@property
def _attr(self):
return self._getter()
def __call__(self):
from warnings import warn
warn(DeprecationWarning(self._message.format(name=self._name)), stacklevel=2)
return self._attr
def __eq__(self, other):
return self._attr == other
def __ne__(self, other):
return self._attr != other
def __bool__(self):
# Python 3
return self._attr
def __nonzero__(self):
# Python 2
return self._attr
def __hash__(self):
return hash(self._attr)
def __repr__(self):
return "MethodReplacedByProperty({}, {}, {})".format(self._name, self._message, self._getter)
def __str__(self):
return str(self._attr)
# TODO: Remove compatibility layer in OctoPrint 1.5.0
class FlaskLoginMethodReplacedByBooleanProperty(MethodReplacedByBooleanProperty):
def __init__(self, name, getter):
message = "{name} is now a property in Flask-Login versions >= 0.3.0, which OctoPrint now uses. " + \
"Use {name} instead of {name}(). This compatibility layer will be removed in OctoPrint 1.5.0."
MethodReplacedByBooleanProperty.__init__(self, name, message, getter)
# TODO: Remove compatibility layer in OctoPrint 1.5.0
class OctoPrintUserMethodReplacedByBooleanProperty(MethodReplacedByBooleanProperty):
def __init__(self, name, getter):
message = "{name} is now a property for consistency reasons with Flask-Login versions >= 0.3.0, which " + \
"OctoPrint now uses. Use {name} instead of {name}(). This compatibility layer will be removed " + \
"in OctoPrint 1.5.0."
MethodReplacedByBooleanProperty.__init__(self, name, message, getter)
##~~ User object
class User(UserMixin):
def __init__(self, username, passwordHash, active, permissions=None, groups=None, apikey=None, settings=None):
if permissions is None:
permissions = []
if groups is None:
groups = []
self._username = username
self._passwordHash = passwordHash
self._active = active
self._permissions = permissions
self._groups = groups
self._apikey = apikey
if settings is None:
settings = dict()
self._settings = settings
def as_dict(self):
from octoprint.access.permissions import OctoPrintPermission
return {
"name": self._username,
"active": bool(self.is_active),
"permissions": list(map(lambda p: p.key, self._permissions)),
"groups": list(map(lambda g: g.key, self._groups)),
"needs": OctoPrintPermission.convert_needs_to_dict(self.needs),
"apikey": self._apikey,
"settings": self._settings,
# TODO: deprecated, remove in 1.5.0
"admin": self.has_permission(Permissions.ADMIN),
"user": not self.is_anonymous,
"roles": self._roles
}
def check_password(self, passwordHash):
return self._passwordHash == passwordHash
def get_id(self):
return self.get_name()
def get_name(self):
return self._username
@property
def is_anonymous(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_anonymous", lambda: False)
@property
def is_authenticated(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_authenticated", lambda: True)
@property
def is_active(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_active", lambda: self._active)
def get_all_settings(self):
return self._settings
def get_setting(self, key):
if not isinstance(key, (tuple, list)):
path = [key]
else:
path = key
return self._get_setting(path)
def set_setting(self, key, value):
if not isinstance(key, (tuple, list)):
path = [key]
else:
path = key
return self._set_setting(path, value)
def _get_setting(self, path):
s = self._settings
for p in path:
if isinstance(s, dict) and p in s:
s = s[p]
else:
return None
return s
def _set_setting(self, path, value):
s = self._settings
for p in path[:-1]:
if p not in s:
s[p] = dict()
if not isinstance(s[p], dict):
s[p] = dict()
s = s[p]
key = path[-1]
s[key] = value
return True
def add_permissions_to_user(self, permissions):
# Make sure the permissions variable is of type list
if not isinstance(permissions, list):
permissions = [permissions]
assert(all(map(lambda p: isinstance(p, OctoPrintPermission), permissions)))
dirty = False
for permission in permissions:
if permissions not in self._permissions:
self._permissions.append(permission)
dirty = True
return dirty
def remove_permissions_from_user(self, permissions):
# Make sure the permissions variable is of type list
if not isinstance(permissions, list):
permissions = [permissions]
assert(all(map(lambda p: isinstance(p, OctoPrintPermission), permissions)))
dirty = False
for permission in permissions:
if permission in self._permissions:
self._permissions.remove(permission)
dirty = True
return dirty
def add_groups_to_user(self, groups):
# Make sure the groups variable is of type list
if not isinstance(groups, list):
groups = [groups]
assert(all(map(lambda p: isinstance(p, Group), groups)))
dirty = False
for group in groups:
if group.is_toggleable() and group not in self._groups:
self._groups.append(group)
dirty = True
return dirty
def remove_groups_from_user(self, groups):
# Make sure the groups variable is of type list
if not isinstance(groups, list):
groups = [groups]
assert(all(map(lambda p: isinstance(p, Group), groups)))
dirty = False
for group in groups:
if group.is_toggleable() and group in self._groups:
self._groups.remove(group)
dirty = True
return dirty
@property
def permissions(self):
if self._permissions is None:
return []
if Permissions.ADMIN in self._permissions:
return Permissions.all()
return list(filter(lambda p: p is not None, self._permissions))
@property
def groups(self):
return list(self._groups)
@property
def effective_permissions(self):
if self._permissions is None:
return []
return list(filter(lambda p: p is not None and self.has_permission(p), Permissions.all()))
@property
def needs(self):
needs = set()
for permission in self.permissions:
if permission is not None:
needs = needs.union(permission.needs)
for group in self.groups:
if group is not None:
needs = needs.union(group.needs)
return needs
def has_permission(self, permission):
return self.has_needs(*permission.needs)
def has_needs(self, *needs):
return set(needs).issubset(self.needs)
def __repr__(self):
return "User(id=%s,name=%s,active=%r,user=True,admin=%r,permissions=%s,groups=%s)" % (self.get_id(), self.get_name(), bool(self.is_active), self.has_permission(Permissions.ADMIN), self._permissions, self._groups)
# ~~ Deprecated methods & properties follow
# TODO: Remove deprecated methods & properties in OctoPrint 1.5.0
asDict = deprecated("asDict has been renamed to as_dict",
includedoc="Replaced by :func:`as_dict`",
since="1.4.0")(as_dict)
@property
@deprecated("is_user is deprecated, please use has_permission", since="1.4.0")
def is_user(self):
return OctoPrintUserMethodReplacedByBooleanProperty("is_user", lambda: not self.is_anonymous)
@property
@deprecated("is_admin is deprecated, please use has_permission", since="1.4.0")
def is_admin(self):
return OctoPrintUserMethodReplacedByBooleanProperty("is_admin", lambda: self.has_permission(Permissions.ADMIN))
@property
@deprecated("roles is deprecated, please use has_permission", since="1.4.0")
def roles(self):
return self._roles
@property
def _roles(self):
"""Helper for the deprecated self.roles and serializing to yaml"""
if self.has_permission(Permissions.ADMIN):
return ["user", "admin"]
elif not self.is_anonymous:
return ["user"]
else:
return []
class AnonymousUser(AnonymousUserMixin, User):
def __init__(self, groups):
User.__init__(self, None, "", True, [], groups)
@property
def is_anonymous(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_anonymous", lambda: True)
@property
def is_authenticated(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_authenticated", lambda: False)
@property
def is_active(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_active", lambda: self._active)
def check_password(self, passwordHash):
return True
def as_dict(self):
from octoprint.access.permissions import OctoPrintPermission
return {
"needs": OctoPrintPermission.convert_needs_to_dict(self.needs)
}
def __repr__(self):
return "AnonymousUser(groups=%s)" % self._groups
class SessionUser(wrapt.ObjectProxy):
def __init__(self, user):
wrapt.ObjectProxy.__init__(self, user)
self._self_session = "".join('%02X' % z for z in bytes(uuid.uuid4().bytes))
self._self_created = monotonic_time()
self._self_touched = monotonic_time()
@property
def session(self):
return self._self_session
@property
def created(self):
return self._self_created
@property
def touched(self):
return self._self_touched
def touch(self):
self._self_touched = monotonic_time()
@deprecated("SessionUser.get_session() has been deprecated, use SessionUser.session instead", since="1.3.5")
def get_session(self):
return self.session
def update_user(self, user):
self.__wrapped__ = user
def as_dict(self):
result = self.__wrapped__.as_dict()
result.update(dict(session=self.session))
return result
def __repr__(self):
return "SessionUser({!r},session={},created={})".format(self.__wrapped__, self.session, self.created)
##~~ User object to use when global api key is used to access the API
class ApiUser(User):
def __init__(self, groups):
User.__init__(self, "_api", "", True, [], groups)
##~~ User object to use when access control is disabled
class AdminUser(User):
def __init__(self, groups):
User.__init__(self, "_admin", "", True, [], groups)
| agpl-3.0 | -4,884,698,765,578,790,000 | 30.650943 | 214 | 0.663214 | false | 3.502697 | false | false | false |
QuantEcon/QuantEcon.py | quantecon/quad.py | 1 | 31180 | """
Defining various quadrature routines.
Based on the quadrature routines found in the CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
import math
import numpy as np
import scipy.linalg as la
from numba import jit, vectorize
from .ce_util import ckron, gridmake
from .util import check_random_state
__all__ = ['qnwcheb', 'qnwequi', 'qnwlege', 'qnwnorm', 'qnwlogn',
'qnwsimp', 'qnwtrap', 'qnwunif', 'quadrect', 'qnwbeta',
'qnwgamma']
@vectorize(nopython=True)
def gammaln(x):
return math.lgamma(x)
@vectorize(nopython=True)
def fix(x):
if x < 0:
return math.ceil(x)
else:
return math.floor(x)
# ------------------ #
# Exported Functions #
# ------------------ #
def qnwcheb(n, a=1, b=1):
"""
Computes multivariate Guass-Checbychev quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwcheb`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwcheb1, n, a, b)
def qnwequi(n, a, b, kind="N", equidist_pp=None, random_state=None):
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
Parameters
----------
n : int
Number of sequence points
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default="N")
One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
equidist_pp : array_like, optional(default=None)
TODO: I don't know what this does
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwequi`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
random_state = check_random_state(random_state)
if equidist_pp is None:
import sympy as sym
equidist_pp = np.sqrt(np.array(list(sym.primerange(0, 7920))))
n, a, b = list(map(np.atleast_1d, list(map(np.asarray, [n, a, b]))))
d = max(list(map(len, [n, a, b])))
n = np.prod(n)
if a.size == 1:
a = np.repeat(a, d)
if b.size == 1:
b = np.repeat(b, d)
i = np.arange(1, n + 1)
if kind.upper() == "N": # Neiderreiter
j = 2.0 ** (np.arange(1, d+1) / (d+1))
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "W": # Weyl
j = equidist_pp[:d]
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "H": # Haber
j = equidist_pp[:d]
nodes = np.outer(i * (i+1) / 2, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "R": # pseudo-random
nodes = random_state.rand(n, d).squeeze()
else:
raise ValueError("Unknown sequence requested")
# compute nodes and weights
r = b - a
nodes = a + nodes * r
weights = (np.prod(r) / n) * np.ones(n)
return nodes, weights
def qnwlege(n, a, b):
"""
Computes multivariate Guass-Legendre quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwlege`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwlege1, n, a, b)
def qnwnorm(n, mu=None, sig2=None, usesqrtm=False):
"""
Computes nodes and weights for multivariate normal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwnorm`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n = np.atleast_1d(n)
d = n.size
if mu is None:
mu = np.zeros(d)
else:
mu = np.atleast_1d(mu)
if sig2 is None:
sig2 = np.eye(d)
else:
sig2 = np.atleast_1d(sig2).reshape(d, d)
if all([x.size == 1 for x in [n, mu, sig2]]):
nodes, weights = _qnwnorm1(n[0])
else:
nodes = []
weights = []
for i in range(d):
_1d = _qnwnorm1(n[i])
nodes.append(_1d[0])
weights.append(_1d[1])
nodes = gridmake(*nodes)
weights = ckron(*weights[::-1])
if usesqrtm:
new_sig2 = la.sqrtm(sig2)
else: # cholesky
new_sig2 = la.cholesky(sig2)
if d > 1:
nodes = nodes.dot(new_sig2) + mu # Broadcast ok
else: # nodes.dot(sig) will not be aligned in scalar case.
nodes = nodes * new_sig2 + mu
return nodes.squeeze(), weights
def qnwlogn(n, mu=None, sig2=None):
"""
Computes nodes and weights for multivariate lognormal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwlogn`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes, weights = qnwnorm(n, mu, sig2)
return np.exp(nodes), weights
def qnwsimp(n, a, b):
"""
Computes multivariate Simpson quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwsimp`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwsimp1, n, a, b)
def qnwtrap(n, a, b):
"""
Computes multivariate trapezoid rule quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwtrap`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwtrap1, n, a, b)
def qnwunif(n, a, b):
"""
Computes quadrature nodes and weights for multivariate uniform
distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwunif`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n, a, b = list(map(np.asarray, [n, a, b]))
nodes, weights = qnwlege(n, a, b)
weights = weights / np.prod(b - a)
return nodes, weights
def quadrect(f, n, a, b, kind='lege', *args, **kwargs):
"""
Integrate the d-dimensional function f on a rectangle with lower and
upper bound for dimension i defined by a[i] and b[i], respectively;
using n[i] points.
Parameters
----------
f : function
The function to integrate over. This should be a function
that accepts as its first argument a matrix representing points
along each dimension (each dimension is a column). Other
arguments that need to be passed to the function are caught by
`*args` and `**kwargs`
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default='lege')
Specifies which type of integration to perform. Valid
values are:
lege - Gauss-Legendre
cheb - Gauss-Chebyshev
trap - trapezoid rule
simp - Simpson rule
N - Neiderreiter equidistributed sequence
W - Weyl equidistributed sequence
H - Haber equidistributed sequence
R - Monte Carlo
*args, **kwargs :
Other arguments passed to the function f
Returns
-------
out : scalar (float)
The value of the integral on the region [a, b]
Notes
-----
Based of original function ``quadrect`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if kind.lower() == "lege":
nodes, weights = qnwlege(n, a, b)
elif kind.lower() == "cheb":
nodes, weights = qnwcheb(n, a, b)
elif kind.lower() == "trap":
nodes, weights = qnwtrap(n, a, b)
elif kind.lower() == "simp":
nodes, weights = qnwsimp(n, a, b)
else:
nodes, weights = qnwequi(n, a, b, kind)
out = weights.dot(f(nodes, *args, **kwargs))
return out
def qnwbeta(n, a=1.0, b=1.0):
"""
Computes nodes and weights for beta distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float), optional(default=1.0)
A length-d
b : array_like(float), optional(default=1.0)
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwbeta`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwbeta1, n, a, b)
def qnwgamma(n, a=1.0, b=1.0, tol=3e-14):
"""
Computes nodes and weights for gamma distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float) : optional(default=ones(d))
Shape parameter of the gamma distribution parameter. Must be positive
b : scalar or array_like(float) : optional(default=ones(d))
Scale parameter of the gamma distribution parameter. Must be positive
tol : scalar or array_like(float) : optional(default=ones(d) * 3e-14)
Tolerance parameter for newton iterations for each node
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwgamma`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwgamma1, n, a, b, tol)
# ------------------ #
# Internal Functions #
# ------------------ #
def _make_multidim_func(one_d_func, n, *args):
"""
A helper function to cut down on code repetition. Almost all of the
code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing
various forms of input arguments and then shelling out to the
corresponding 1d version of the function.
This routine does all the argument checking and passes things
through the appropriate 1d function before using a tensor product
to combine weights and nodes.
Parameters
----------
one_d_func : function
The 1d function to be called along each dimension
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
args :
These are the arguments to various qnw____ functions. For the
majority of the functions this is just a and b, but some differ.
Returns
-------
func : function
The multi-dimensional version of the parameter ``one_d_func``
"""
_args = list(args)
n = np.atleast_1d(n)
args = list(map(np.atleast_1d, _args))
if all([x.size == 1 for x in [n] + args]):
return one_d_func(n[0], *_args)
d = n.size
for i in range(len(args)):
if args[i].size == 1:
args[i] = np.repeat(args[i], d)
nodes = []
weights = []
for i in range(d):
ai = [x[i] for x in args]
_1d = one_d_func(n[i], *ai)
nodes.append(_1d[0])
weights.append(_1d[1])
weights = ckron(*weights[::-1]) # reverse ordered tensor product
nodes = gridmake(*nodes)
return nodes, weights
@jit(nopython=True)
def _qnwcheb1(n, a, b):
"""
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
# Create temporary arrays to be used in computing weights
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate((np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))))
# compute weights and return
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3
return nodes, weights
@jit(nopython=True)
def _qnwlege1(n, a, b):
"""
Compute univariate Guass-Legendre quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwlege1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
# import ipdb; ipdb.set_trace()
maxit = 100
m = int(fix((n + 1) / 2.0))
xm = 0.5 * (b + a)
xl = 0.5 * (b - a)
nodes = np.zeros(n)
weights = nodes.copy()
i = np.arange(m)
z = np.cos(np.pi * ((i + 1.0) - 0.25) / (n + 0.5))
for its in range(maxit):
p1 = np.ones_like(z)
p2 = np.zeros_like(z)
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = ((2 * j - 1) * z * p2 - (j - 1) * p3) / j
# https://github.com/QuantEcon/QuantEcon.py/issues/530
top = n * (z * p1 - p2)
bottom = z ** 2 - 1.0
pp = top / bottom
z1 = z.copy()
z = z1 - p1/pp
if np.all(np.abs(z - z1) < 1e-14):
break
if its == maxit - 1:
raise ValueError("Maximum iterations in _qnwlege1")
nodes[i] = xm - xl * z
nodes[- i - 1] = xm + xl * z
# https://github.com/QuantEcon/QuantEcon.py/issues/530
weights[i] = 2 * xl / ((1 - z ** 2) * pp * pp)
weights[- i - 1] = weights[i]
return nodes, weights
@jit(nopython=True)
def _qnwnorm1(n):
"""
Compute nodes and weights for quadrature of univariate standard
normal distribution
Parameters
----------
n : int
The number of nodes
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwnorm1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
maxit = 100
pim4 = 1 / np.pi**(0.25)
m = int(fix((n + 1) / 2))
nodes = np.zeros(n)
weights = np.zeros(n)
for i in range(m):
if i == 0:
z = np.sqrt(2*n+1) - 1.85575 * ((2 * n + 1)**(-1 / 6.1))
elif i == 1:
z = z - 1.14 * (n ** 0.426) / z
elif i == 2:
z = 1.86 * z + 0.86 * nodes[0]
elif i == 3:
z = 1.91 * z + 0.91 * nodes[1]
else:
z = 2 * z + nodes[i-2]
its = 0
while its < maxit:
its += 1
p1 = pim4
p2 = 0
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = z * math.sqrt(2.0/j) * p2 - math.sqrt((j - 1.0) / j) * p3
pp = math.sqrt(2 * n) * p2
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-14:
break
if its == maxit:
raise ValueError("Failed to converge in _qnwnorm1")
nodes[n - 1 - i] = z
nodes[i] = -z
weights[i] = 2 / (pp*pp)
weights[n - 1 - i] = weights[i]
weights /= math.sqrt(math.pi)
nodes = nodes * math.sqrt(2.0)
return nodes, weights
@jit(nopython=True)
def _qnwsimp1(n, a, b):
"""
Compute univariate Simpson quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwsimp1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if n % 2 == 0:
print("WARNING qnwsimp: n must be an odd integer. Increasing by 1")
n += 1
nodes = np.linspace(a, b, n)
dx = nodes[1] - nodes[0]
weights = np.kron(np.ones((n+1) // 2), np.array([2.0, 4.0]))
weights = weights[:n]
weights[0] = weights[-1] = 1
weights = (dx / 3.0) * weights
return nodes, weights
@jit(nopython=True)
def _qnwtrap1(n, a, b):
"""
Compute univariate trapezoid rule quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwtrap1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if n < 1:
raise ValueError("n must be at least one")
nodes = np.linspace(a, b, n)
dx = nodes[1] - nodes[0]
weights = dx * np.ones(n)
weights[0] *= 0.5
weights[-1] *= 0.5
return nodes, weights
@jit(nopython=True)
def _qnwbeta1(n, a=1.0, b=1.0):
"""
Computes nodes and weights for quadrature on the beta distribution.
Default is a=b=1 which is just a uniform distribution
NOTE: For now I am just following compecon; would be much better to
find a different way since I don't know what they are doing.
Parameters
----------
n : scalar : int
The number of quadrature points
a : scalar : float, optional(default=1)
First Beta distribution parameter
b : scalar : float, optional(default=1)
Second Beta distribution parameter
Returns
-------
nodes : np.ndarray(dtype=float, ndim=1)
The quadrature points
weights : np.ndarray(dtype=float, ndim=1)
The quadrature weights that correspond to nodes
Notes
-----
Based of original function ``_qnwbeta1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
# We subtract one and write a + 1 where we actually want a, and a
# where we want a - 1
a = a - 1
b = b - 1
maxiter = 25
# Allocate empty space
nodes = np.zeros(n)
weights = np.zeros(n)
# Find "reasonable" starting values. Why these numbers?
for i in range(n):
if i == 0:
an = a/n
bn = b/n
r1 = (1+a) * (2.78/(4+n*n) + .768*an/n)
r2 = 1 + 1.48*an + .96*bn + .452*an*an + .83*an*bn
z = 1 - r1/r2
elif i == 1:
r1 = (4.1+a) / ((1+a)*(1+0.156*a))
r2 = 1 + 0.06 * (n-8) * (1+0.12*a)/n
r3 = 1 + 0.012*b * (1+0.25*abs(a))/n
z = z - (1-z) * r1 * r2 * r3
elif i == 2:
r1 = (1.67+0.28*a)/(1+0.37*a)
r2 = 1+0.22*(n-8)/n
r3 = 1+8*b/((6.28+b)*n*n)
z = z-(nodes[0]-z)*r1*r2*r3
elif i == n - 2:
r1 = (1+0.235*b)/(0.766+0.119*b)
r2 = 1/(1+0.639*(n-4)/(1+0.71*(n-4)))
r3 = 1/(1+20*a/((7.5+a)*n*n))
z = z+(z-nodes[-4])*r1*r2*r3
elif i == n - 1:
r1 = (1+0.37*b) / (1.67+0.28*b)
r2 = 1 / (1+0.22*(n-8)/n)
r3 = 1 / (1+8*a/((6.28+a)*n*n))
z = z+(z-nodes[-3])*r1*r2*r3
else:
z = 3*nodes[i-1] - 3*nodes[i-2] + nodes[i-3]
ab = a+b
# Root finding
its = 0
z1 = -100
while abs(z - z1) > 1e-10 and its < maxiter:
temp = 2 + ab
p1 = (a-b + temp*z)/2
p2 = 1
for j in range(2, n+1):
p3 = p2
p2 = p1
temp = 2*j + ab
aa = 2*j * (j+ab)*(temp-2)
bb = (temp-1) * (a*a - b*b + temp*(temp-2) * z)
c = 2 * (j - 1 + a) * (j - 1 + b) * temp
p1 = (bb*p2 - c*p3)/aa
pp = (n*(a-b-temp*z) * p1 + 2*(n+a)*(n+b)*p2)/(temp*(1 - z*z))
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-12:
break
its += 1
if its == maxiter:
raise ValueError("Max Iteration reached. Failed to converge")
nodes[i] = z
weights[i] = temp/(pp*p2)
nodes = (1-nodes)/2
weights = weights * math.exp(gammaln(a+n) + gammaln(b+n) -
gammaln(n+1) - gammaln(n+ab+1))
weights = weights / (2*math.exp(gammaln(a+1) + gammaln(b+1) -
gammaln(ab+2)))
return nodes, weights
@jit(nopython=True)
def _qnwgamma1(n, a=1.0, b=1.0, tol=3e-14):
"""
1d quadrature weights and nodes for Gamma distributed random variable
Parameters
----------
n : scalar : int
The number of quadrature points
a : scalar : float, optional(default=1.0)
Shape parameter of the gamma distribution parameter. Must be positive
b : scalar : float, optional(default=1.0)
Scale parameter of the gamma distribution parameter. Must be positive
tol : scalar : float, optional(default=3e-14)
Tolerance parameter for newton iterations for each node
Returns
-------
nodes : np.ndarray(dtype=float, ndim=1)
The quadrature points
weights : np.ndarray(dtype=float, ndim=1)
The quadrature weights that correspond to nodes
Notes
-----
Based of original function ``qnwgamma1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
a -= 1
maxit = 25
factor = -math.exp(gammaln(a+n) - gammaln(n) - gammaln(a+1))
nodes = np.zeros(n)
weights = np.zeros(n)
# Create nodes
for i in range(n):
# Reasonable starting values
if i == 0:
z = (1+a) * (3+0.92*a) / (1 + 2.4*n + 1.8*a)
elif i == 1:
z = z + (15 + 6.25*a) / (1 + 0.9*a + 2.5*n)
else:
j = i-1
z = z + ((1 + 2.55*j) / (1.9*j) + 1.26*j*a / (1 + 3.5*j)) * \
(z - nodes[j-1]) / (1 + 0.3*a)
# root finding iterations
its = 0
z1 = -10000
while abs(z - z1) > tol and its < maxit:
p1 = 1.0
p2 = 0.0
for j in range(1, n+1):
# Recurrance relation for Laguerre polynomials
p3 = p2
p2 = p1
p1 = ((2*j - 1 + a - z)*p2 - (j - 1 + a)*p3) / j
pp = (n*p1 - (n+a)*p2) / z
z1 = z
z = z1 - p1/pp
its += 1
if its == maxit:
raise ValueError('Failure to converge')
nodes[i] = z
weights[i] = factor / (pp*n*p2)
return nodes*b, weights
| bsd-3-clause | -5,027,179,250,230,418,000 | 25.135792 | 78 | 0.56982 | false | 3.475256 | false | false | false |
mosbys/Clone | Cloning_v1/drive.py | 1 | 3838 | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from random import randint
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
import cv2
# Fix error with Keras and TensorFlow
import tensorflow as tf
import matplotlib.pyplot as plt
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
iDebug = 0
def preprocess(image, top_offset=.375, bottom_offset=.125):
"""
Applies preprocessing pipeline to an image: crops `top_offset` and `bottom_offset`
portions of image, resizes to 32x128 px and scales pixel values to [0, 1].
"""
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
image = image[top:-bottom, :]
newShape = image.shape
image= cv2.resize(image,(int(newShape[1]/2), int(newShape[0]/2)), interpolation = cv2.INTER_CUBIC)
return image
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
image_array=preprocess(image_array)
newShape = image_array.shape
#image_array=cv2.resize(image_array,(newShape[1], newShape[0]),interpolation=cv2.INTER_CUBIC)
transformed_image_array = image_array[None, :, :, :]
if (iDebug==1):
plt.imshow(image_array)
plt.show()
#transformed_image_array2 = np.zeros([1,2*64,64,3])
#transformed_image_array2[0]=cv2.resize(transformed_image_array[0],(2*64, 64),interpolation=cv2.INTER_CUBIC)
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
#steering_angle = randint(0,100)/100*randint(-1,1);
throttle = 0.2
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
# model = model_from_json(json.loads(jfile.read()))\
#
# instead.
#model = model_from_json(jfile.read())
model = model_from_json(json.loads(jfile.read()))
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | gpl-2.0 | 8,273,103,825,005,780,000 | 32.285714 | 112 | 0.663627 | false | 3.524334 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.