code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from .binary_search_tree import BinarySearchTree
| dskoda1/hpds | hpds/trees/__init__.py | Python | bsd-2-clause | 49 |
from pytest import mark
from django.urls import reverse
from email_template.models import Email
from assopy.models import AssopyUser
from conference.accounts import PRIVACY_POLICY_CHECKBOX, PRIVACY_POLICY_ERROR
from conference.models import CaptchaQuestion
from conference.users import RANDOM_USERNAME_LENGTH
from tests.common_tools import make_user, redirects_to, template_used, create_homepage_in_cms
SIGNUP_SUCCESFUL_302 = 302
SIGNUP_FAILED_200 = 200
login_url = reverse("accounts:login")
def check_login(client, email):
"Small helper for tests to check if login works correctly"
response = client.post(
login_url,
{
"email": email,
"password": "password",
"i_accept_privacy_policy": True,
},
)
# redirect means successful login, 200 means errors on form
LOGIN_SUCCESFUL_302 = 302
assert response.status_code == LOGIN_SUCCESFUL_302
return True
def activate_only_user():
user = AssopyUser.objects.get()
user.user.is_active = True
user.user.save()
@mark.django_db
def test_user_registration(client):
"""
Tests if users can create new account on the website
(to buy tickets, etc).
"""
# required for redirects to /
create_homepage_in_cms()
# 1. test if user can create new account
sign_up_url = reverse("accounts:signup_step_1_create_account")
response = client.get(sign_up_url)
assert response.status_code == 200
assert template_used(response, "conference/accounts/signup.html")
assert template_used(response, "conference/accounts/_login_with_google.html")
assert template_used(response, "conference/base.html")
assert PRIVACY_POLICY_CHECKBOX in response.content.decode("utf-8")
assert AssopyUser.objects.all().count() == 0
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
},
follow=True,
)
assert response.status_code == SIGNUP_FAILED_200
assert "/privacy/" in PRIVACY_POLICY_CHECKBOX
assert "I consent to the use of my data" in PRIVACY_POLICY_CHECKBOX
assert response.context["form"].errors["__all__"] == [PRIVACY_POLICY_ERROR]
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
follow=True,
)
# check if redirect was correct
assert template_used(
response, "conference/accounts/signup_please_verify_email.html"
)
assert template_used(response, "conference/base.html")
user = AssopyUser.objects.get()
assert user.name() == "Joe Doe"
assert user.user.is_active is False
# check if the random username was generated
assert len(user.user.username) == RANDOM_USERNAME_LENGTH
is_logged_in = client.login(
email="[email protected]", password="password"
)
assert is_logged_in is False # user is inactive
response = client.get("/")
assert template_used(response, "conference/homepage/home_template.html")
assert "Joe Doe" not in response.content.decode("utf-8")
assert "Log out" not in response.content.decode("utf-8")
# enable the user
user.user.is_active = True
user.user.save()
is_logged_in = client.login(
email="[email protected]", password="password"
)
assert is_logged_in
response = client.get("/")
assert template_used(response, "conference/homepage/home_template.html")
# checking if user is logged in.
assert "Joe Doe" in response.content.decode("utf-8")
@mark.django_db
def test_393_emails_are_lowercased_and_login_is_case_insensitive(client):
"""
https://github.com/EuroPython/epcon/issues/393
Test if we can regiester new account if we use the same email with
different case.
"""
sign_up_url = reverse("accounts:signup_step_1_create_account")
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_SUCCESFUL_302
user = AssopyUser.objects.get()
assert user.name() == "Joe Doe"
assert user.user.email == "[email protected]"
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_FAILED_200
assert response.context["form"].errors["email"] == ["Email already in use"]
user = AssopyUser.objects.get() # still only one user
assert user.name() == "Joe Doe"
assert user.user.email == "[email protected]"
# activate user so we can log in
user.user.is_active = True
user.user.save()
# check if we can login with lowercase
# the emails will be lowercased in db, but user is still able to log in
# using whatever case they want
assert check_login(client, email="[email protected]")
assert check_login(client, email="[email protected]")
assert check_login(client, email="[email protected]")
assert check_login(client, email="[email protected]")
@mark.django_db
def test_703_test_captcha_questions(client):
"""
https://github.com/EuroPython/epcon/issues/703
"""
QUESTION = "Can you foo in Python?"
ANSWER = "Yes you can"
CaptchaQuestion.objects.create(question=QUESTION, answer=ANSWER)
Email.objects.create(code="verify-account")
sign_up_url = reverse("accounts:signup_step_1_create_account")
response = client.get(sign_up_url)
# we have question in captcha_question.initial and captcha_answer.label
assert "captcha_question" in response.content.decode("utf-8")
assert "captcha_answer" in response.content.decode("utf-8")
assert response.content.decode("utf-8").count(QUESTION) == 2
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_FAILED_200 # because missing captcha
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
"captcha_question": QUESTION,
"captcha_answer": "No you can't",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_FAILED_200 # because wrong answer
wrong_answer = ["Sorry, that's a wrong answer"]
assert response.context["form"].errors["captcha_answer"] == wrong_answer
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "[email protected]",
"password1": "password",
"password2": "password",
"captcha_question": QUESTION,
"captcha_answer": ANSWER,
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_SUCCESFUL_302
activate_only_user()
assert check_login(client, email="[email protected]")
# if there are no enabled questions they don't appear on the form
CaptchaQuestion.objects.update(enabled=False)
response = client.get(sign_up_url)
assert "captcha_question" not in response.content.decode("utf-8")
assert "captcha_answer" not in response.content.decode("utf-8")
assert response.content.decode("utf-8").count(QUESTION) == 0
@mark.django_db
def test_872_login_redirects_to_user_dashboard(client):
u = make_user(email='[email protected]', password='foobar')
response = client.post(
login_url,
{
"email": u.email,
"password": 'foobar',
"i_accept_privacy_policy": True,
},
)
assert response.status_code == 302
assert redirects_to(response, "/user-panel/")
| EuroPython/epcon | tests/test_user_login_and_registration.py | Python | bsd-2-clause | 8,704 |
# -*- encoding: utf-8 -*-
import mock
import os
from shutil import rmtree
from tempfile import mkdtemp
from django.test import TestCase
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from django.template.base import TemplateDoesNotExist
from paperclip.models import Attachment
from geotrek.common.models import Organism, FileType
from geotrek.common.parsers import ExcelParser, AttachmentParserMixin
class OrganismParser(ExcelParser):
model = Organism
fields = {'organism': 'nOm'}
class OrganismEidParser(ExcelParser):
model = Organism
fields = {'organism': 'nOm'}
eid = 'organism'
class AttachmentParser(AttachmentParserMixin, OrganismEidParser):
non_fields = {'attachments': 'photo'}
class ParserTests(TestCase):
def test_bad_parser_class(self):
with self.assertRaises(CommandError) as cm:
call_command('import', 'geotrek.common.DoesNotExist', '', verbosity=0)
self.assertEqual(unicode(cm.exception), u"Failed to import parser class 'geotrek.common.DoesNotExist'")
def test_bad_filename(self):
with self.assertRaises(CommandError) as cm:
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', 'find_me/I_am_not_there.shp', verbosity=0)
self.assertEqual(unicode(cm.exception), u"File does not exists at: find_me/I_am_not_there.shp")
def test_create(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', filename, verbosity=0)
self.assertEqual(Organism.objects.count(), 1)
organism = Organism.objects.get()
self.assertEqual(organism.organism, u"Comité Théodule")
def test_duplicate_without_eid(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', filename, verbosity=0)
self.assertEqual(Organism.objects.count(), 2)
def test_unmodified_with_eid(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename, verbosity=0)
self.assertEqual(Organism.objects.count(), 1)
def test_updated_with_eid(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
filename2 = os.path.join(os.path.dirname(__file__), 'data', 'organism2.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename2, verbosity=0)
self.assertEqual(Organism.objects.count(), 2)
organisms = Organism.objects.order_by('pk')
self.assertEqual(organisms[0].organism, u"Comité Théodule")
self.assertEqual(organisms[1].organism, u"Comité Hippolyte")
def test_report_format_text(self):
parser = OrganismParser()
self.assertRegexpMatches(parser.report(), '0/0 lines imported.')
self.assertNotRegexpMatches(parser.report(), '<div id=\"collapse-\$celery_id\" class=\"collapse\">')
def test_report_format_html(self):
parser = OrganismParser()
self.assertRegexpMatches(parser.report(output_format='html'), '<div id=\"collapse-\$celery_id\" class=\"collapse\">')
def test_report_format_bad(self):
parser = OrganismParser()
with self.assertRaises(TemplateDoesNotExist):
parser.report(output_format='toto')
@override_settings(MEDIA_ROOT=mkdtemp('geotrek_test'))
class AttachmentParserTests(TestCase):
def setUp(self):
self.filetype = FileType.objects.create(type=u"Photographie")
def tearDown(self):
rmtree(settings.MEDIA_ROOT)
@mock.patch('requests.get')
def test_attachment(self, mocked):
mocked.return_value.status_code = 200
mocked.return_value.content = ''
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.AttachmentParser', filename, verbosity=0)
organism = Organism.objects.get()
attachment = Attachment.objects.get()
self.assertEqual(attachment.content_object, organism)
self.assertEqual(attachment.attachment_file.name, 'paperclip/common_organism/{pk}/titi.png'.format(pk=organism.pk))
self.assertEqual(attachment.filetype, self.filetype)
@mock.patch('requests.get')
def test_attachment_not_updated(self, mocked):
mocked.return_value.status_code = 200
mocked.return_value.content = ''
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.AttachmentParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.AttachmentParser', filename, verbosity=0)
self.assertEqual(mocked.call_count, 1)
self.assertEqual(Attachment.objects.count(), 1)
| johan--/Geotrek | geotrek/common/tests/test_parsers.py | Python | bsd-2-clause | 5,465 |
from hippiehug import RedisStore, Tree, Leaf, Branch
import pytest
## ============== TESTS ===================
def test_evidence():
t = Tree()
# Test positive case
t.add(b"Hello", b"Hello")
t.add(b"World", b"World")
root, E = t.evidence(b"World")
assert len(E) == 2
store = dict((e.identity(), e) for e in E)
t2 = Tree(store, root)
assert t2.is_in(b"World")
def test_store(rstore):
l = Leaf(b"Hello", b"Hello")
rstore[l.identity()] = l
assert rstore[l.identity()].identity() == l.identity()
def test_store_tree(rstore):
t = Tree(store=rstore)
from os import urandom
for _ in range(100):
item = urandom(32)
t.add(item, item)
assert t.is_in(item)
assert not t.is_in(urandom(32))
def test_leaf_isin():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"Woitemrld", b"Woitemrld")
assert l.is_in(store, b"Hello", b"Hello")
def test_leaf_isin_map():
l = Leaf(item=b"Hello", key=b"World")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
assert l.is_in(store, item=b"Hello", key=b"World")
def test_Branch_isin():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
assert b.is_in(store, b"Hello", b"Hello")
assert b.is_in(store, b"World", b"World")
def test_Branch_isin_map():
l = Leaf(item=b"Hello", key=b"A")
store = {l.identity() : l}
b = l.add(store, item=b"World", key=b"B")
assert b.is_in(store, b"Hello", b"A")
assert b.is_in(store, b"World", b"B")
assert not b.is_in(store, b"World", b"C")
def test_Branch_multi():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.multi_add(store, [b"B", b"C"], [b"B", b"C"])
b.check(store)
assert b.is_in(store, b"B", b"B")
assert b.is_in(store, b"C", b"C")
assert b.is_in(store, b"Hello", b"Hello")
def test_Branch_add():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
b2 = b.add(store, b"Doom", b"Doom")
assert isinstance(b2, Branch)
assert b2.left_branch in store
assert b2.right_branch in store
assert b2.identity() in store
b2.check(store)
def test_add_like_a_monkey():
root = Leaf(b"Hello",b"Hello")
store = {root.identity() : root}
from os import urandom
for _ in range(100):
item = urandom(32)
root = root.add(store, item, item)
root.check(store)
assert root.is_in(store, item, item)
def test_Leaf_add():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
assert isinstance(b, Branch)
assert b.left_branch in store
assert b.right_branch in store
assert b.identity() in store
assert store[b.left_branch].item <= b.pivot
assert store[b.right_branch].item > b.pivot
def test_Tree():
t = Tree()
def test_add_isin():
t = Tree()
# Test positive case
t.add(b"Hello")
assert t.is_in(b"Hello") == True
# Infix operator
assert b"Hello" in t
def test_fail_isin():
t = Tree()
# Test negative case
assert t.is_in(b"World") == False
def test_massive():
t = Tree()
from os import urandom
for _ in range(100):
item = urandom(32)
t.add(item)
assert t.is_in(item)
assert not t.is_in(urandom(32))
def test_multi_add():
t = Tree()
from os import urandom
X = [urandom(32) for _ in range(100)]
t.multi_add(X)
for x in X:
assert x in t
X = [urandom(32) for _ in range(100)]
t.multi_add(X)
for x in X:
assert x in t
Y = [urandom(32) for _ in range(100)]
for y in Y:
assert y not in t
def test_multi_small():
t = Tree()
t.multi_add([b"Hello", b"World"])
assert b"Hello" in t
assert b"World" in t
t.multi_add([b"A", b"B", b"C", b"D", b"E", b"F"])
assert b"E" in t
assert b"F" in t
def test_multi_test():
t = Tree()
t.multi_add([b"Hello", b"World"])
assert t.multi_is_in([b"Hello", b"World"]) == [True, True]
answer, head, evidence = t.multi_is_in([b"Hello", b"World"], evidence=True)
assert answer == [True, True]
e = dict((k.identity(), k) for k in evidence)
t2 = Tree(e, head)
assert t2.multi_is_in([b"Hello", b"World"]) == [True, True]
def test_lookup():
l = Leaf(item=b"Hello", key=b"A")
store = {l.identity() : l}
b = l.add(store, item=b"World", key=b"B")
assert b.is_in(store, b"Hello", b"A")
assert b.is_in(store, b"World", b"B")
assert not b.is_in(store, b"World", b"C")
assert b.lookup(store, b"B") == (b"B", b"World")
try:
b.lookup(store, b"B") == (b"B", b"World2")
assert False
except:
assert True
try:
b.lookup(store, b"C") == (b"B", b"World2")
assert False
except:
assert True
def test_double_add():
l = Leaf(item=b"Hello", key=b"A")
store = {l.identity() : l}
b = l.add(store, item=b"World", key=b"B")
assert b.is_in(store, b"Hello", b"A")
assert b.is_in(store, b"World", b"B")
assert not b.is_in(store, b"World", b"C")
b = b.add(store, item=b"World2", key=b"B")
assert b.lookup(store, b"B") == (b"B", b"World")
assert not b.lookup(store, b"B") == (b"B", b"World2")
def test_tree_default_store():
t = Tree()
t.multi_add([b"test"])
assert t.is_in(b"test")
t2 = Tree()
assert not t2.is_in(b"test")
def test_tree_empty_store():
store = {}
t = Tree(store)
t.multi_add([b"test"])
assert t.is_in(b"test")
t2 = Tree(store, root_hash=t.root())
assert t2.is_in(b"test")
| gdanezis/rousseau-chain | hippiehug-package/tests/test_tree.py | Python | bsd-2-clause | 5,884 |
from flask import *
from pyZPL import *
from printLabel import printLabel
import xml.etree.ElementTree as ET
import os
app = Flask(__name__)
dn = os.path.dirname(os.path.realpath(__file__))+"/"
tree = ET.parse(dn+"pace.xml")
customElements = tree.findall(".//*[@id]")
customItems = []
for element in customElements:
newItem = ZPLCustomItem()
newItem.ID = element.get("id")
newItem.data = element.text
newItem.type = element.tag
if element.get("fixed"):
newItem.fixed = "readonly"
customItems.append(newItem)
@app.route('/')
def root():
return render_template("index.html",items=customItems)
@app.route('/print', methods=['POST'])
def print_():
customItemsModified = []
if request.method == 'POST':
for key,value in request.form.iteritems():
newItem = ZPLCustomItem()
split = key.split('_')
newItem.type = split[len(split)-1]
newItem.ID = str.join("_",split[:len(split)-1])
newItem.data = request.form[newItem.ID+"_string"]
try:
request.form[newItem.ID+"_bool"]
newItem.visible = True
except KeyError:
newItem.visible = False
customItemsModified.append(newItem)
return printLabel(customItemsModified)
else:
return "can has post?"
if __name__ == '__main__':
app.debug = True
app.run()
| OHRI-BioInfo/pyZPL | web.py | Python | bsd-2-clause | 1,418 |
# coding: utf-8
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logger', '0011_add-index-to-instance-uuid_and_xform_uuid'),
]
operations = [
migrations.AddField(
model_name='xform',
name='kpi_asset_uid',
field=models.CharField(max_length=32, null=True),
),
]
| kobotoolbox/kobocat | onadata/apps/logger/migrations/0012_add_asset_uid_to_xform.py | Python | bsd-2-clause | 390 |
import copy
import mufsim.utils as util
import mufsim.gamedb as db
import mufsim.stackitems as si
from mufsim.errors import MufRuntimeError
from mufsim.insts.base import Instruction, instr
class InstPushItem(Instruction):
value = 0
def __init__(self, line, val):
self.value = val
super(InstPushItem, self).__init__(line)
def execute(self, fr):
fr.data_push(self.value)
def __str__(self):
return si.item_repr(self.value)
class InstGlobalVar(Instruction):
varnum = 0
varname = 0
def __init__(self, line, vnum, vname):
self.varnum = vnum
self.varname = vname
super(InstGlobalVar, self).__init__(line)
def execute(self, fr):
fr.data_push(si.GlobalVar(self.varnum))
def __str__(self):
return "LV%d: %s" % (self.varnum, self.varname)
class InstFuncVar(Instruction):
varnum = 0
varname = 0
def __init__(self, line, vnum, vname):
self.varnum = vnum
self.varname = vname
super(InstFuncVar, self).__init__(line)
def execute(self, fr):
fr.data_push(si.FuncVar(self.varnum))
def __str__(self):
return "SV%d: %s" % (self.varnum, self.varname)
@instr("secure_sysvars")
class InstSecureSysvars(Instruction):
def execute(self, fr):
fr.globalvar_set(0, fr.user)
fr.globalvar_set(1, si.DBRef(db.getobj(fr.user).location))
fr.globalvar_set(2, fr.trigger)
fr.globalvar_set(3, fr.command)
@instr("!")
class InstBang(Instruction):
def execute(self, fr):
fr.check_underflow(2)
v = fr.data_pop(si.GlobalVar, si.FuncVar)
val = fr.data_pop()
if isinstance(v, si.GlobalVar):
fr.globalvar_set(v.value, val)
elif isinstance(v, si.FuncVar):
fr.funcvar_set(v.value, val)
def __str__(self):
return "!"
@instr("@")
class InstAt(Instruction):
def execute(self, fr):
v = fr.data_pop(si.GlobalVar, si.FuncVar)
if isinstance(v, si.GlobalVar):
val = fr.globalvar_get(v.value)
fr.data_push(val)
elif isinstance(v, si.FuncVar):
val = fr.funcvar_get(v.value)
fr.data_push(val)
def __str__(self):
return "@"
@instr("dup")
class InstDup(Instruction):
def execute(self, fr):
a = fr.data_pop()
fr.data_push(a)
fr.data_push(a)
@instr("shallow_copy")
class InstShallowCopy(Instruction):
def execute(self, fr):
a = fr.data_pop()
fr.data_push(a)
fr.data_push(copy.copy(a))
@instr("deep_copy")
class InstDeepCopy(Instruction):
def execute(self, fr):
a = fr.data_pop()
fr.data_push(a)
fr.data_push(copy.deepcopy(a))
@instr("?dup")
class InstQDup(Instruction):
def execute(self, fr):
a = fr.data_pop()
if isinstance(a, si.DBRef):
if a.value != -1:
fr.data_push(a)
elif a:
fr.data_push(a)
fr.data_push(a)
@instr("dupn")
class InstDupN(Instruction):
def execute(self, fr):
n = fr.data_pop(int)
fr.check_underflow(n)
for i in range(n):
fr.data_push(fr.data_pick(n))
@instr("ldup")
class InstLDup(Instruction):
def execute(self, fr):
n = fr.data_pick(1)
if not isinstance(n, int):
raise MufRuntimeError("Expected integer argument.")
n += 1
fr.check_underflow(n)
for i in range(n):
fr.data_push(fr.data_pick(n))
@instr("pop")
class InstPop(Instruction):
def execute(self, fr):
fr.data_pop()
@instr("popn")
class InstPopN(Instruction):
def execute(self, fr):
n = fr.data_pop(int)
fr.check_underflow(n)
for i in range(n):
fr.data_pop()
@instr("swap")
class InstSwap(Instruction):
def execute(self, fr):
fr.check_underflow(2)
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(b)
fr.data_push(a)
@instr("rot")
class InstRot(Instruction):
def execute(self, fr):
fr.check_underflow(3)
a = fr.data_pull(3)
fr.data_push(a)
@instr("-rot")
class InstNegRot(Instruction):
def execute(self, fr):
fr.check_underflow(3)
c = fr.data_pop()
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(c)
fr.data_push(a)
fr.data_push(b)
@instr("rotate")
class InstRotate(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
if num < 0:
a = fr.data_pop()
fr.data_insert((-num) - 1, a)
elif num > 0:
a = fr.data_pull(num)
fr.data_push(a)
@instr("pick")
class InstPick(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
if num < 0:
raise MufRuntimeError("Expected positive integer.")
else:
a = fr.data_pick(num)
fr.data_push(a)
@instr("over")
class InstOver(Instruction):
def execute(self, fr):
fr.check_underflow(2)
a = fr.data_pick(2)
fr.data_push(a)
@instr("put")
class InstPut(Instruction):
def execute(self, fr):
fr.check_underflow(2)
num = fr.data_pop(int)
val = fr.data_pop()
fr.check_underflow(num)
if not num:
return
if num < 0:
raise MufRuntimeError("Value out of range")
else:
fr.data_put(num, val)
@instr("nip")
class InstNip(Instruction):
def execute(self, fr):
fr.check_underflow(3)
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(b)
@instr("tuck")
class InstTuck(Instruction):
def execute(self, fr):
fr.check_underflow(3)
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(b)
fr.data_push(a)
fr.data_push(b)
@instr("reverse")
class InstReverse(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
arr = [fr.data_pop() for i in range(num)]
for val in arr:
fr.data_push(val)
@instr("lreverse")
class InstLReverse(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
arr = [fr.data_pop() for i in range(num)]
for val in arr:
fr.data_push(val)
fr.data_push(num)
@instr("{")
class InstMark(Instruction):
def execute(self, fr):
fr.data_push(si.Mark())
@instr("}")
class InstMarkCount(Instruction):
def execute(self, fr):
for i in range(fr.data_depth()):
a = fr.data_pick(i + 1)
if isinstance(a, si.Mark):
fr.data_pull(i + 1)
fr.data_push(i)
return
raise MufRuntimeError("StackUnderflow")
@instr("depth")
class InstDepth(Instruction):
def execute(self, fr):
fr.data_push(fr.data_depth())
@instr("fulldepth")
class InstFullDepth(Instruction):
def execute(self, fr):
fr.data_push(fr.data_full_depth())
@instr("variable")
class InstVariable(Instruction):
def execute(self, fr):
vnum = fr.data_pop(int)
fr.data_push(si.GlobalVar(vnum))
@instr("localvar")
class InstLocalVar(Instruction):
def execute(self, fr):
vnum = fr.data_pop(int)
fr.data_push(si.GlobalVar(vnum))
@instr("caller")
class InstCaller(Instruction):
def execute(self, fr):
fr.data_push(fr.caller_get())
@instr("prog")
class InstProg(Instruction):
def execute(self, fr):
fr.data_push(fr.program)
@instr("trig")
class InstTrig(Instruction):
def execute(self, fr):
fr.data_push(fr.trigger)
@instr("cmd")
class InstCmd(Instruction):
def execute(self, fr):
fr.data_push(fr.command)
@instr("checkargs")
class InstCheckArgs(Instruction):
itemtypes = {
'a': ([si.Address], "address"),
'd': ([si.DBRef], "dbref"),
'D': ([si.DBRef], "valid object dbref"),
'e': ([si.DBRef], "exit dbref"),
'E': ([si.DBRef], "valid exit dbref"),
'f': ([si.DBRef], "program dbref"),
'F': ([si.DBRef], "valid program dbref"),
'i': ([int], "integer"),
'l': ([si.Lock], "lock"),
'p': ([si.DBRef], "player dbref"),
'P': ([si.DBRef], "valid player dbref"),
'r': ([si.DBRef], "room dbref"),
'R': ([si.DBRef], "valid room dbref"),
's': ([str], "string"),
'S': ([str], "non-null string"),
't': ([si.DBRef], "thing dbref"),
'T': ([si.DBRef], "valid thing dbref"),
'v': ([si.GlobalVar, si.FuncVar], "variable"),
'?': ([], "any"),
}
objtypes = {
'D': "",
'P': "player",
'R': "room",
'T': "thing",
'E': "exit",
'F': "program",
}
def checkargs_part(self, fr, fmt, depth=1):
count = ""
pos = len(fmt) - 1
while pos >= 0:
ch = fmt[pos]
pos -= 1
if ch == " ":
continue
elif util.is_int(ch):
count = ch + count
continue
elif ch == "}":
newpos = pos
cnt = 1 if not count else int(count)
for i in range(cnt):
val = fr.data_pick(depth)
depth += 1
fr.check_type(val, [int])
for j in range(val):
newpos, depth = self.checkargs_part(
fr, fmt[:pos + 1], depth)
pos = newpos
count = ""
elif ch == "{":
return (pos, depth)
elif ch in self.itemtypes:
cnt = 1 if not count else int(count)
count = ""
for i in range(cnt):
val = fr.data_pick(depth)
depth += 1
types, label = self.itemtypes[ch]
fr.check_type(val, types)
if ch == "S" and val == "":
raise MufRuntimeError(
"Expected %s at depth %d" % (label, depth))
if si.DBRef in types:
typ = self.objtypes[ch.upper()]
if (
not db.validobj(val) and
ch.isupper()
) or (
db.validobj(val) and typ and
db.getobj(val).objtype != typ
):
raise MufRuntimeError(
"Expected %s at depth %d" % (label, depth))
def execute(self, fr):
argexp = fr.data_pop(str)
self.checkargs_part(fr, argexp)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| revarbat/mufsim | mufsim/insts/stack.py | Python | bsd-2-clause | 11,103 |
#!/usr/bin/env python
# encoding: utf-8
class MyRange(object):
def __init__(self, n):
self.idx = 0
self.n = n
def __iter__(self):
return self
def next(self):
if self.idx < self.n:
val = self.idx
self.idx += 1
return val
else:
raise StopIteration()
myRange = MyRange(3)
for i in myRange:
print i
| feixiao5566/Py_Rabbic | IO/自定义迭代器.py | Python | bsd-2-clause | 402 |
# Homework 2 solution, part 1: cnf.py
# Andrew Gordon
# Feb 18, 2015
# Revised June 19, 2015 for better input/output and implies->if
import sys
import fileinput
def biconditionalElimination(s):
if type(s) is str:
return s
elif type(s) is list and s[0] == "iff":
return(["and",
["if",
biconditionalElimination(s[1]),
biconditionalElimination(s[2])],
["if",
biconditionalElimination(s[2]),
biconditionalElimination(s[1])]])
else:
return([s[0]] + [biconditionalElimination(i) for i in s[1:]])
def implicationElimination(s):
if type(s) is str:
return s
elif type(s) is list and s[0] == "if":
return(["or",
["not",
implicationElimination(s[1])],
implicationElimination(s[2])])
else:
return([s[0]] + [implicationElimination(i) for i in s[1:]])
def doubleNegationElimination(s):
if type(s) is str:
return s
elif type(s) is list and s[0] == "not" and type(s[1]) is list and s[1][0] == "not":
return(doubleNegationElimination(s[1][1]))
else:
return([s[0]] + [doubleNegationElimination(i) for i in s[1:]])
def demorgan(s):
revision = demorgan1(s)
if revision == s:
return s
else:
return demorgan(revision)
def demorgan1(s):
if type(s) is str:
return s
elif type(s) is list and s[0] == "not" and type(s[1]) is list and s[1][0] == "and":
return(["or"] + [demorgan(["not", i]) for i in s[1][1:]])
elif type(s) is list and s[0] == "not" and type(s[1]) is list and s[1][0] == "or":
return(["and"] + [demorgan(["not", i]) for i in s[1][1:]])
else:
return ([s[0]] + [demorgan(i) for i in s[1:]])
def binaryize(s): # ensures all connectives are binary (and / or)
if type(s) is str:
return s
elif type(s) is list and s[0] == "and" and len(s) > 3: # too long
return(["and", s[1], binaryize(["and"] + s[2:])])
elif type(s) is list and s[0] == "or" and len(s) > 3: # too long
return(["or", s[1], binaryize(["or"] + s[2:])])
else:
return([s[0]] + [binaryize(i) for i in s[1:]])
def distributivity(s):
revision = distributivity1(s)
if revision == s:
return s
else:
return distributivity(revision)
def distributivity1(s): # only works on binary connectives
if type(s) is str:
return s
elif type(s) is list and s[0] == "or" and type(s[1]) is list and s[1][0] == "and":
# distribute s[2] over s[1]
return(["and"] + [distributivity(["or", i, s[2]]) for i in s[1][1:]])
elif type(s) is list and s[0] == "or" and type(s[2]) is list and s[2][0] == "and":
# distribute s[1] over s[2]
return(["and"] + [distributivity(["or", i, s[1]]) for i in s[2][1:]])
else:
return ([s[0]] + [distributivity(i) for i in s[1:]])
def andAssociativity(s):
revision = andAssociativity1(s)
if revision == s:
return s
else:
return andAssociativity(revision)
def andAssociativity1(s):
if type(s) is str:
return s
elif type(s) is list and s[0] == "and":
result = ["and"]
# iterate through conjuncts looking for "and" lists
for i in s[1:]:
if type(i) is list and i[0] == "and":
result = result + i[1:]
else:
result.append(i)
return result
else:
return([s[0]] + [andAssociativity1(i) for i in s[1:]])
def orAssociativity(s):
revision = orAssociativity1(s)
if revision == s:
return s
else:
return orAssociativity(revision)
def orAssociativity1(s):
if type(s) is str:
return s
elif type(s) is list and s[0] == "or":
result = ["or"]
# iterate through disjuncts looking for "or" lists
for i in s[1:]:
if type(i) is list and i[0] == "or":
result = result + i[1:]
else:
result.append(i)
return result
else:
return([s[0]] + [orAssociativity1(i) for i in s[1:]])
def removeDuplicateLiterals(s):
if type(s) is str:
return s
if s[0] == "not":
return s
if s[0] == "and":
return(["and"] + [removeDuplicateLiterals(i) for i in s[1:]])
if s[0] == "or":
remains = []
for l in s[1:]:
if l not in remains:
remains.append(l)
if len(remains) == 1:
return remains[0]
else:
return(["or"] + remains)
def removeDuplicateClauses(s):
if type(s) is str:
return s
if s[0] == "not":
return s
if s[0] == "or":
return s
if s[0] == "and": #conjunction of clauses
remains = []
for c in s[1:]:
if unique(c, remains):
remains.append(c)
if len(remains) == 1:
return remains[0]
else:
return(["and"] + remains)
def unique(c, remains):
for p in remains:
if type(c) is str or type(p) is str:
if c == p:
return False
elif len(c) == len(p):
if len([i for i in c[1:] if i not in p[1:]]) == 0:
return False
return True
def cnf(s):
s = biconditionalElimination(s)
s = implicationElimination(s)
s = demorgan(s)
s = doubleNegationElimination(s)
s = binaryize(s)
s = distributivity(s)
s = andAssociativity(s)
s = orAssociativity(s)
s = removeDuplicateLiterals(s)
s = removeDuplicateClauses(s)
return s
if __name__ == "__main__":
sentences = fileinput.input()
for l in sentences:
print repr(cnf(eval(l.strip())))
| asgordon/DPLL | cnf.py | Python | bsd-2-clause | 5,803 |
from mpi4py import MPI
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.problem_classes.HeatEquation_1D_FD import heat1d
from pySDC.implementations.sweeper_classes.generic_LU import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
def set_parameters_ml():
"""
Helper routine to set parameters for the following multi-level runs
Returns:
dict: dictionary containing the simulation parameters
dict: dictionary containing the controller parameters
float: starting time
float: end time
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 5E-10
level_params['dt'] = 0.125
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['QI'] = 'LU'
sweeper_params['num_nodes'] = [3]
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = 2 # frequency for the test value
problem_params['nvars'] = [63, 31] # number of degrees of freedom for each level
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
step_params['errtol'] = 1E-05
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True # can ask the controller to keep iterating all steps until the end
controller_params['use_iteration_estimator'] = False # activate iteration estimator
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heat1d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
return description, controller_params, t0, Tend
if __name__ == "__main__":
"""
A simple test program to do MPI-parallel PFASST runs
"""
# set MPI communicator
comm = MPI.COMM_WORLD
# get parameters from Part A
description, controller_params, t0, Tend = set_parameters_ml()
# instantiate controllers
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main functions to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
# combine statistics into list of statistics
iter_counts_list = comm.gather(iter_counts, root=0)
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
out = 'Working with %2i processes...' % size
print(out)
# compute exact solutions and compare with both results
uex = P.u_exact(Tend)
err = abs(uex - uend)
out = 'Error vs. exact solution: %12.8e' % err
print(out)
# build one list of statistics instead of list of lists, the sort by time
iter_counts_gather = [item for sublist in iter_counts_list for item in sublist]
iter_counts = sorted(iter_counts_gather, key=lambda tup: tup[0])
# compute and print statistics
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %1i ' % (item[0], item[1])
print(out)
| Parallel-in-Time/pySDC | pySDC/playgrounds/compression/run_parallel_Heat_NumPy.py | Python | bsd-2-clause | 4,544 |
from django.test import TestCase
from addressbase.models import Address
from addressbase.tests.factories import AddressFactory, UprnToCouncilFactory
class TestAddressFactory(TestCase):
def test_address_factory(self):
address = AddressFactory()
self.assertEqual(len(address.uprn), 9)
self.assertEqual(address.addressbase_postal, "D")
class TestUprnToCouncilFactory(TestCase):
def test_uprn_to_council_factory(self):
uprn_to_council = UprnToCouncilFactory()
self.assertIsInstance(uprn_to_council.uprn, Address)
| DemocracyClub/UK-Polling-Stations | polling_stations/apps/addressbase/tests/test_factories.py | Python | bsd-3-clause | 562 |
""" Test functions for stats module
"""
import warnings
import re
import sys
import pickle
import os
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import IntegrationWarning
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from .test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(object):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGenInvGauss(object):
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestNormInvGauss(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck(object):
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm(object):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(object):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(object):
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# @pytest.mark.xfail(reason="truncnorm rvs is know to fail at extreme tails")
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
with np.errstate(divide='ignore'):
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
print(low, x.min(), x.max(), high)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), low+0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16, 3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), low+0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high), stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high), stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high), stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def _test_moments_one_range(self, a, b, expected):
m0, v0, s0, k0 = expected[:4]
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_almost_equal(m, m0)
assert_almost_equal(v, v0)
assert_almost_equal(s, s0)
assert_almost_equal(k, k0)
@pytest.mark.xfail_on_32bit("reduced accuracy with 32bit platforms.")
def test_moments(self):
# Values validated by changing TRUNCNORM_TAIL_X so as to evaluate
# using both the _norm_XXX() and _norm_logXXX() functions, and by
# removing the _stats and _munp methods in truncnorm tp force
# numerical quadrature.
self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-3, 3, [0, 0.97333692, 0.0, -0.17111444])
self._test_moments_one_range(-2, 2, [0, 0.7737413, 0.0, -0.63446328])
self._test_moments_one_range(0, np.inf, [0.79788456, 0.36338023, 0.99527175, 0.8691773])
self._test_moments_one_range(-1, 3, [0.2827861, 0.61614174, 0.53930185, -0.20582065])
self._test_moments_one_range(-3, 1, [-0.2827861, 0.61614174, -0.53930185, -0.20582065])
self._test_moments_one_range(-10, -9, [-9.10845629, 0.01144881, -1.89856073, 5.07334611])
self._test_moments_one_range(-20, -19, [-19.05234395, 0.00272507, -1.9838686, 5.87208674])
self._test_moments_one_range(-30, -29, [-29.03440124, 0.00118066, -1.99297727, 5.9303358])
self._test_moments_one_range(-40, -39, [-39.02560741993262, 0.0006548, -1.99631464, 5.61677584])
self._test_moments_one_range(39, 40, [39.02560741993262, 0.0006548, 1.99631464, 5.61677584])
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma(object):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(object):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(object):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
class TestGenpareto(object):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(object):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(object):
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01, 9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01, 9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01, 9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01, 9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01, 9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01, 9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(object):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(object):
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
print(_f, (_args), ans, _correct, ans == _correct)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
class TestRvDiscrete(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
class TestSkewNorm(object):
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon(object):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.expon.fit, x)
class TestNorm(object):
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestUniform(object):
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.uniform.fit, x)
class TestExponNorm(object):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(1, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
class TestGenExpon(object):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(object):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(object):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(object):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(object):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
class TestBetaPrime(object):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(object):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
class TestChi2(object):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL(object):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestLevyStable(object):
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [-.05413,-.05413,
0.,0.,0.,0.,
.00533,.00533,.00533,.00533,.00533,
.03354,.03354,.03354,.03354,.03354,
.05309,.05309,.05309,.05309,.05309]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(loc1, 0.00233, 2) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309,.05309,.05309,.05309,.05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.slow
def test_pdf_nolan_samples(self):
""" Test pdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-pdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
# support numpy 1.8.2 for travis
npisin = np.isin if hasattr(np, "isin") else np.in1d
tests = [
# best selects
['best', None, 8, None],
# quadrature is accurate for most alpha except 0.25; perhaps limitation of Nolan stablec?
# we reduce size of x to speed up computation as numerical integration slow.
['quadrature', None, 8, lambda r: (r['alpha'] > 0.25) & (npisin(r['x'], [-10,-5,0,5,10]))],
# zolatarev is accurate except at alpha==1, beta != 0
['zolotarev', None, 8, lambda r: r['alpha'] != 1],
['zolotarev', None, 8, lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],
['zolotarev', None, 1, lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],
# fft accuracy reduces as alpha decreases, fails at low values of alpha and x=0
['fft', 0, 4, lambda r: r['alpha'] > 1],
['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],
['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)], # not useful here
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "Density calculation unstable for alpha=1 and beta!=0.*")
sup.record(RuntimeWarning, "Density calculations experimental for FFT method.*")
p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "pdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
@pytest.mark.slow
def test_cdf_nolan_samples(self):
""" Test cdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-cdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
tests = [
# zolatarev is accurate for all values
['zolotarev', None, 8, None],
# fft accuracy poor, very poor alpha < 1
['fft', 0, 2, lambda r: r['alpha'] > 1],
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, 'FFT method is considered ' +
'experimental for cumulative distribution ' +
'function evaluations.*')
p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "cdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
def test_pdf_alpha_equals_one_beta_non_zero(self):
""" sample points extracted from Tables and Graphs of Stable Probability
Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array([0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4])
density = np.array([.3183, .3096, .2925, .2622,
.1591, .1587, .1599, .1635,
.0637, .0729, .0812, .0955,
.0318, .0390, .0458, .0586,
.0187, .0236, .0285, .0384])
betas = np.array([0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1])
tests = [
['quadrature', None, 4],
#['fft', 0, 4],
['zolotarev', None, 1],
]
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=RuntimeWarning, message="Density calculation unstable.*")
for default_method, fft_min_points, decimal_places in tests:
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
#stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(pdf, density, decimal_places, default_method)
def test_stats(self):
param_sets = [
[(1.48,-.22, 0, 1), (0,np.inf,np.NaN,np.NaN)],
[(2,.9, 10, 1.5), (10,4.5,0,0)]
]
for args, exp_stats in param_sets:
calc_stats = stats.levy_stable.stats(args[0], args[1], loc=args[2], scale=args[3], moments='mvsk')
assert_almost_equal(calc_stats, exp_stats)
class TestArrayArgument(object): # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(object):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(object):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def test_entropy_base_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=1),
[0.63651417, 0.63651417, 0.66156324])
def test_entropy_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=1),
[0.231049, 0.231049, 0.127706])
def test_entropy_raises_value_error(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.1, 0.2], [0.6, 0.3]]
assert_raises(ValueError, stats.entropy, pk, qk)
def test_base_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=0),
stats.entropy(pk))
def test_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=0),
stats.entropy(pk, qk))
def test_base_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
assert_array_almost_equal(stats.entropy(pk.T).T,
stats.entropy(pk, axis=1))
def test_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,
stats.entropy(pk, qk, axis=1))
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['frechet_l', 'frechet_r', 'expon', 'norm', 'uniform', ]
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)
assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(object):
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(object):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(object):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(object):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(object):
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]), stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh(object):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(object):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(object):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(object):
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
class TestTrapz(object):
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
class TestTriang(object):
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke(object):
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr(object):
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 - (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in log")
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(object):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(object):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
class TestHistogram(object):
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm._munp(n, 1.0, 2.5), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_loguniform():
# This test makes sure the alias of "loguniform" is log-uniform
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=42)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
class TestArgus(object):
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
def test_argus_rvs_ratio_uniforms(self):
# test that the ratio of uniforms algorithms works for chi > 2.611
x = stats.argus.rvs(3.5, size=1500, random_state=1535)
assert_almost_equal(stats.argus(3.5).mean(), x.mean(), decimal=3)
assert_almost_equal(stats.argus(3.5).std(), x.std(), decimal=3)
| aeklant/scipy | scipy/stats/tests/test_distributions.py | Python | bsd-3-clause | 165,416 |
from __future__ import absolute_import, unicode_literals, division, print_function
from . import model_base
__all__ = ['PhotomModelB4']
class PhotomModelB4(model_base.DataModel):
"""
A data model for photom reference files.
"""
schema_url = "photomb4.schema.yaml"
def __init__(self, init=None, phot_table=None, **kwargs):
super(PhotomModelB4, self).__init__(init=init, **kwargs)
if phot_table is not None:
self.phot_table = phot_table
class NircamPhotomModelB4(PhotomModelB4):
"""
A data model for NIRCam photom reference files.
"""
schema_url = "nircam_photomb4.schema.yaml"
def __init__(self, init=None, phot_table=None, **kwargs):
super(NircamPhotomModelB4, self).__init__(init=init, **kwargs)
if phot_table is not None:
self.phot_table = phot_table
class NirissPhotomModelB4(PhotomModelB4):
"""
A data model for NIRISS photom reference files.
"""
schema_url = "niriss_photomb4.schema.yaml"
def __init__(self, init=None, phot_table=None, **kwargs):
super(NirissPhotomModelB4, self).__init__(init=init, **kwargs)
if phot_table is not None:
self.phot_table = phot_table
class NirspecPhotomModelB4(PhotomModelB4):
"""
A data model for NIRSpec photom reference files.
"""
schema_url = "nirspec_photomb4.schema.yaml"
def __init__(self, init=None, phot_table=None, **kwargs):
super(NirspecPhotomModelB4, self).__init__(init=init, **kwargs)
if phot_table is not None:
self.phot_table = phot_table
class MiriImgPhotomModelB4(PhotomModelB4):
"""
A data model for MIRI imaging photom reference files.
"""
schema_url = "mirimg_photomb4.schema.yaml"
def __init__(self, init=None, phot_table=None, **kwargs):
super(MiriImgPhotomModelB4, self).__init__(init=init, **kwargs)
if phot_table is not None:
self.phot_table = phot_table
class MiriMrsPhotomModelB4(PhotomModelB4):
"""
A data model for MIRI MRS photom reference files.
"""
schema_url = "mirmrs_photomb4.schema.yaml"
def __init__(self, init=None, phot_table=None, **kwargs):
super(MiriMrsPhotomModelB4, self).__init__(init=init, **kwargs)
if phot_table is not None:
self.phot_table = phot_table
| mdboom/jwst_lib.models | jwst_lib/models/photom_b4.py | Python | bsd-3-clause | 2,355 |
"""
* Copyright (c) 2012-2017, Nic McDonald and Adriana Flores
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import codecs
import re
import os
import sys
try:
from setuptools import setup
except:
print('please install setuptools via pip:')
print(' pip3 install setuptools')
sys.exit(-1)
def find_version(*file_paths):
version_file = codecs.open(os.path.join(os.path.abspath(
os.path.dirname(__file__)), *file_paths), 'r').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='sssweep',
version=find_version('sssweep', '__init__.py'),
description='Automatic task generation for SuperSim sweeps and plot web viewer',
author='Nic McDonald and Adriana Flores',
author_email='[email protected] and [email protected]',
license='BSD',
url='http://github.com/nicmcd/sssweep',
packages=['sssweep'],
install_requires=['taskrun >= 3.0.0',
'ssplot >= 0.1.0'],
)
| adrifloresm/sssweep | setup.py | Python | bsd-3-clause | 2,611 |
import pytest
from collections import namedtuple
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from skopt import dummy_minimize
from skopt.benchmarks import bench1
from skopt.callbacks import TimerCallback
from skopt.callbacks import DeltaYStopper
@pytest.mark.fast_test
def test_timer_callback():
callback = TimerCallback()
dummy_minimize(bench1, [(-1.0, 1.0)], callback=callback, n_calls=10)
assert_equal(len(callback.iter_time), 10)
assert_less(0.0, sum(callback.iter_time))
@pytest.mark.fast_test
def test_deltay_stopper():
deltay = DeltaYStopper(0.2, 3)
Result = namedtuple('Result', ['func_vals'])
assert deltay(Result([0, 1, 2, 3, 4, 0.1, 0.19]))
assert not deltay(Result([0, 1, 2, 3, 4, 0.1]))
assert deltay(Result([0, 1])) is None
| ccauet/scikit-optimize | skopt/tests/test_callbacks.py | Python | bsd-3-clause | 832 |
import os
import sys
import re
from bento.compat \
import \
inspect as compat_inspect
from bento.commands.core \
import \
command
SAFE_MODULE_NAME = re.compile("[^a-zA-Z_]")
__HOOK_REGISTRY = {}
__PRE_HOOK_REGISTRY = {}
__POST_HOOK_REGISTRY = {}
__COMMANDS_OVERRIDE = {}
__INIT_FUNCS = {}
def add_to_registry(func, category):
global __HOOK_REGISTRY
if not category in __HOOK_REGISTRY:
__HOOK_REGISTRY[category] = [func]
else:
__HOOK_REGISTRY[category].append(func)
def override_command(command, func):
global __COMMANDS_OVERRIDE
local_dir = os.path.dirname(compat_inspect.stack()[2][1])
if __COMMANDS_OVERRIDE.has_key(command):
__COMMANDS_OVERRIDE[command].append((func, local_dir))
else:
__COMMANDS_OVERRIDE[command] = [(func, local_dir)]
def add_to_pre_registry(func, cmd_name):
global __PRE_HOOK_REGISTRY
if not cmd_name in __PRE_HOOK_REGISTRY:
__PRE_HOOK_REGISTRY[cmd_name] = [func]
else:
__PRE_HOOK_REGISTRY[cmd_name].append(func)
def add_to_post_registry(func, cmd_name):
global __POST_HOOK_REGISTRY
if not cmd_name in __POST_HOOK_REGISTRY:
__POST_HOOK_REGISTRY[cmd_name] = [func]
else:
__POST_HOOK_REGISTRY[cmd_name].append(func)
def get_registry_categories():
global __HOOK_REGISTRY
return __HOOK_REGISTRY.keys()
def get_registry_category(categorie):
global __HOOK_REGISTRY
return __HOOK_REGISTRY[categorie]
def get_pre_hooks(cmd_name):
global __PRE_HOOK_REGISTRY
return __PRE_HOOK_REGISTRY.get(cmd_name, [])
def get_post_hooks(cmd_name):
global __POST_HOOK_REGISTRY
return __POST_HOOK_REGISTRY.get(cmd_name, [])
def get_command_override(cmd_name):
global __COMMANDS_OVERRIDE
return __COMMANDS_OVERRIDE.get(cmd_name, [])
def _make_hook_decorator(command_name, kind):
name = "%s_%s" % (kind, command_name)
help_bypass = False
def decorator(f):
local_dir = os.path.dirname(compat_inspect.stack()[1][1])
add_to_registry((f, local_dir, help_bypass), name)
if kind == "post":
add_to_post_registry((f, local_dir, help_bypass), command_name)
elif kind == "pre":
add_to_pre_registry((f, local_dir, help_bypass), command_name)
else:
raise ValueError("invalid hook kind %s" % kind)
return f
return decorator
post_configure = _make_hook_decorator("configure", "post")
pre_configure = _make_hook_decorator("configure", "pre")
post_build = _make_hook_decorator("build", "post")
pre_build = _make_hook_decorator("build", "pre")
post_sdist = _make_hook_decorator("sdist", "post")
pre_sdist = _make_hook_decorator("sdist", "pre")
def override(f):
override_command(f.__name__, f)
def options(f):
__INIT_FUNCS["options"] = f
return lambda context: f(context)
def startup(f):
__INIT_FUNCS["startup"] = f
return lambda context: f(context)
def shutdown(f):
__INIT_FUNCS["shutdown"] = f
return lambda context: f(context)
def dummy_startup(ctx):
pass
def dummy_options(ctx):
pass
def dummy_shutdown():
pass
def create_hook_module(target):
import imp
safe_name = SAFE_MODULE_NAME.sub("_", target, len(target))
module_name = "bento_hook_%s" % safe_name
main_file = os.path.abspath(target)
module = imp.new_module(module_name)
module.__file__ = main_file
code = open(main_file).read()
sys.path.insert(0, os.path.dirname(main_file))
try:
exec(compile(code, main_file, 'exec'), module.__dict__)
sys.modules[module_name] = module
finally:
sys.path.pop(0)
module.root_path = main_file
if not "startup" in __INIT_FUNCS:
module.startup = dummy_startup
if not "options" in __INIT_FUNCS:
module.options = dummy_options
if not "shutdown" in __INIT_FUNCS:
module.shutdown = dummy_shutdown
return module
| abadger/Bento | bento/commands/hooks.py | Python | bsd-3-clause | 3,935 |
import os
import nose
import django
NAME = os.path.basename(os.path.dirname(__file__))
ROOT = os.path.abspath(os.path.dirname(__file__))
os.environ['DJANGO_SETTINGS_MODULE'] = 'fake_settings'
os.environ['PYTHONPATH'] = os.pathsep.join([ROOT,
os.path.join(ROOT, 'examples')])
if __name__ == '__main__':
if hasattr(django, 'setup'):
# Django's app registry was added in 1.7. We need to call `setup` to
# initiate it.
django.setup()
nose.main()
| jbalogh/jingo | run_tests.py | Python | bsd-3-clause | 522 |
# -*-coding:Utf-8 -*
# Copyright (c) 2012 NOEL-BARON Léo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'chercherbois'."""
from random import random, randint, choice
from math import sqrt
from primaires.interpreteur.commande.commande import Commande
from primaires.perso.exceptions.stat import DepassementStat
class CmdChercherBois(Commande):
"""Commande 'chercherbois'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "chercherbois", "gatherwood")
self.nom_categorie = "objets"
self.aide_courte = "permet de chercher du bois"
self.aide_longue = \
"Cette commande permet de chercher du combustible dans la salle " \
"où vous vous trouvez."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
salle = personnage.salle
if salle.interieur:
personnage << "|err|Vous ne pouvez chercher du combustible " \
"ici.|ff|"
return
personnage.agir("chercherbois")
prototypes = importeur.objet.prototypes.values()
prototypes = [p for p in prototypes if p.est_de_type("combustible")]
combustibles = []
choix = None
for proto in prototypes:
if personnage.salle.terrain.nom in proto.terrains:
combustibles.append((proto.rarete, proto))
combustibles = sorted(combustibles, key=lambda combu: combu[0])
if not combustibles:
personnage << "|err|Il n'y a rien qui puisse brûler par ici.|ff|"
else:
niveau = sqrt(personnage.get_talent("collecte_bois") / 100)
if not niveau:
niveau = 0.1
proba_trouver = round(random(), 1)
if proba_trouver <= niveau: # on trouve du bois
possibles = []
for proba, combustible in combustibles:
if 2 * proba_trouver >= (proba - 1) / 10:
for i in range(int(10 / proba)):
possibles.append(combustible)
nb_obj = randint(int(proba_trouver * 10), int(niveau * 10)) + 1
if possibles:
choix = choice(possibles)
somme_qualites = 0
end = int(choix.poids_unitaire * nb_obj / 2)
try:
personnage.stats.endurance -= end
except DepassementStat:
personnage << "|err|Vous êtes trop fatigué pour " \
"cela.|ff|"
return
try:
personnage.stats.endurance -= 3
except DepassementStat:
personnage << "|err|Vous êtes trop fatigué pour cela.|ff|"
return
# On cherche le bois
personnage.etats.ajouter("collecte_bois")
personnage << "Vous vous penchez et commencez à chercher du bois."
personnage.salle.envoyer(
"{} se met à chercher quelque chose par terre.",
personnage)
yield 5
if "collecte_bois" not in personnage.etats:
return
if choix:
for i in range(nb_obj):
objet = importeur.objet.creer_objet(choix)
personnage.salle.objets_sol.ajouter(objet)
somme_qualites += objet.qualite
personnage << "Vous trouvez {} " \
"et vous relevez.".format(choix.get_nom(nb_obj))
personnage.salle.envoyer("{} se relève, l'air satisfait.",
personnage)
personnage.pratiquer_talent("collecte_bois")
personnage.gagner_xp("survie", somme_qualites * 2)
else:
personnage << "Vous vous redressez sans avoir rien trouvé."
personnage.salle.envoyer("{} se relève, l'air dépité.",
personnage)
personnage.pratiquer_talent("collecte_bois", 4)
personnage.etats.retirer("collecte_bois")
| stormi/tsunami | src/primaires/salle/commandes/chercherbois/__init__.py | Python | bsd-3-clause | 5,680 |
from __future__ import absolute_import, unicode_literals
import pickle
from io import StringIO, BytesIO
from kombu import version_info_t
from kombu import utils
from kombu.five import python_2_unicode_compatible
from kombu.utils.text import version_string_as_tuple
from kombu.tests.case import Case, Mock, patch, mock
@python_2_unicode_compatible
class OldString(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def split(self, *args, **kwargs):
return self.value.split(*args, **kwargs)
def rsplit(self, *args, **kwargs):
return self.value.rsplit(*args, **kwargs)
class test_kombu_module(Case):
def test_dir(self):
import kombu
self.assertTrue(dir(kombu))
class test_utils(Case):
def test_maybe_list(self):
self.assertEqual(utils.maybe_list(None), [])
self.assertEqual(utils.maybe_list(1), [1])
self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3])
def test_fxrange_no_repeatlast(self):
self.assertEqual(list(utils.fxrange(1.0, 3.0, 1.0)),
[1.0, 2.0, 3.0])
def test_fxrangemax(self):
self.assertEqual(list(utils.fxrangemax(1.0, 3.0, 1.0, 30.0)),
[1.0, 2.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0])
self.assertEqual(list(utils.fxrangemax(1.0, None, 1.0, 30.0)),
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
def test_reprkwargs(self):
self.assertTrue(utils.reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'}))
def test_reprcall(self):
self.assertTrue(
utils.reprcall('add', (2, 2), {'copy': True}),
)
class test_UUID(Case):
def test_uuid4(self):
self.assertNotEqual(utils.uuid4(),
utils.uuid4())
def test_uuid(self):
i1 = utils.uuid()
i2 = utils.uuid()
self.assertIsInstance(i1, str)
self.assertNotEqual(i1, i2)
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_dump_state(Case):
@mock.stdouts
def test_dump(self, stdout, stderr):
fh = MyBytesIO()
utils.emergency_dump_state(
{'foo': 'bar'}, open_file=lambda n, m: fh)
self.assertDictEqual(
pickle.loads(fh.getvalue()), {'foo': 'bar'})
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
@mock.stdouts
def test_dump_second_strategy(self, stdout, stderr):
fh = MyStringIO()
def raise_something(*args, **kwargs):
raise KeyError('foo')
utils.emergency_dump_state(
{'foo': 'bar'},
open_file=lambda n, m: fh, dump=raise_something
)
self.assertIn('foo', fh.getvalue())
self.assertIn('bar', fh.getvalue())
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
class test_retry_over_time(Case):
def setup(self):
self.index = 0
class Predicate(Exception):
pass
def myfun(self):
if self.index < 9:
raise self.Predicate()
return 42
def errback(self, exc, intervals, retries):
interval = next(intervals)
sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0)
self.index += 1
self.assertEqual(interval, sleepvals[self.index])
return interval
@mock.sleepdeprived(module=utils)
def test_simple(self):
prev_count, utils.count = utils.count, Mock()
try:
utils.count.return_value = list(range(1))
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=None, interval_max=14)
self.assertIsNone(x)
utils.count.return_value = list(range(10))
cb = Mock()
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=self.errback, callback=cb,
interval_max=14)
self.assertEqual(x, 42)
self.assertEqual(self.index, 9)
cb.assert_called_with()
finally:
utils.count = prev_count
@mock.sleepdeprived(module=utils)
def test_retry_once(self):
with self.assertRaises(self.Predicate):
utils.retry_over_time(
self.myfun, self.Predicate,
max_retries=1, errback=self.errback, interval_max=14,
)
self.assertEqual(self.index, 1)
# no errback
with self.assertRaises(self.Predicate):
utils.retry_over_time(
self.myfun, self.Predicate,
max_retries=1, errback=None, interval_max=14,
)
@mock.sleepdeprived(module=utils)
def test_retry_always(self):
Predicate = self.Predicate
class Fun(object):
def __init__(self):
self.calls = 0
def __call__(self, *args, **kwargs):
try:
if self.calls >= 10:
return 42
raise Predicate()
finally:
self.calls += 1
fun = Fun()
self.assertEqual(
utils.retry_over_time(
fun, self.Predicate,
max_retries=0, errback=None, interval_max=14,
),
42,
)
self.assertEqual(fun.calls, 11)
class test_cached_property(Case):
def test_deleting(self):
class X(object):
xx = False
@utils.cached_property
def foo(self):
return 42
@foo.deleter # noqa
def foo(self, value):
self.xx = value
x = X()
del(x.foo)
self.assertFalse(x.xx)
x.__dict__['foo'] = 'here'
del(x.foo)
self.assertEqual(x.xx, 'here')
def test_when_access_from_class(self):
class X(object):
xx = None
@utils.cached_property
def foo(self):
return 42
@foo.setter # noqa
def foo(self, value):
self.xx = 10
desc = X.__dict__['foo']
self.assertIs(X.foo, desc)
self.assertIs(desc.__get__(None), desc)
self.assertIs(desc.__set__(None, 1), desc)
self.assertIs(desc.__delete__(None), desc)
self.assertTrue(desc.setter(1))
x = X()
x.foo = 30
self.assertEqual(x.xx, 10)
del(x.foo)
class test_symbol_by_name(Case):
def test_instance_returns_instance(self):
instance = object()
self.assertIs(utils.symbol_by_name(instance), instance)
def test_returns_default(self):
default = object()
self.assertIs(
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default),
default,
)
def test_no_default(self):
with self.assertRaises(ImportError):
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz')
def test_imp_reraises_ValueError(self):
imp = Mock()
imp.side_effect = ValueError()
with self.assertRaises(ValueError):
utils.symbol_by_name('kombu.Connection', imp=imp)
def test_package(self):
from kombu.entity import Exchange
self.assertIs(
utils.symbol_by_name('.entity:Exchange', package='kombu'),
Exchange,
)
self.assertTrue(utils.symbol_by_name(':Consumer', package='kombu'))
class test_ChannelPromise(Case):
def test_repr(self):
obj = Mock(name='cb')
self.assertIn(
'promise',
repr(utils.ChannelPromise(obj)),
)
obj.assert_not_called()
class test_entrypoints(Case):
@mock.mask_modules('pkg_resources')
def test_without_pkg_resources(self):
self.assertListEqual(list(utils.entrypoints('kombu.test')), [])
@mock.module_exists('pkg_resources')
def test_with_pkg_resources(self):
with patch('pkg_resources.iter_entry_points', create=True) as iterep:
eps = iterep.return_value = [Mock(), Mock()]
self.assertTrue(list(utils.entrypoints('kombu.test')))
iterep.assert_called_with('kombu.test')
eps[0].load.assert_called_with()
eps[1].load.assert_called_with()
class test_shufflecycle(Case):
def test_shuffles(self):
prev_repeat, utils.repeat = utils.repeat, Mock()
try:
utils.repeat.return_value = list(range(10))
values = {'A', 'B', 'C'}
cycle = utils.shufflecycle(values)
seen = set()
for i in range(10):
next(cycle)
utils.repeat.assert_called_with(None)
self.assertTrue(seen.issubset(values))
with self.assertRaises(StopIteration):
next(cycle)
next(cycle)
finally:
utils.repeat = prev_repeat
class test_version_string_as_tuple(Case):
def test_versions(self):
self.assertTupleEqual(
version_string_as_tuple('3'),
version_info_t(3, 0, 0, '', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3'),
version_info_t(3, 3, 0, '', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3.1'),
version_info_t(3, 3, 1, '', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3.1a3'),
version_info_t(3, 3, 1, 'a3', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3.1a3-40c32'),
version_info_t(3, 3, 1, 'a3', '40c32'),
)
self.assertEqual(
version_string_as_tuple('3.3.1.a3.40c32'),
version_info_t(3, 3, 1, 'a3', '40c32'),
)
class test_maybe_fileno(Case):
def test_maybe_fileno(self):
self.assertEqual(utils.maybe_fileno(3), 3)
f = Mock(name='file')
self.assertIs(utils.maybe_fileno(f), f.fileno())
f.fileno.side_effect = ValueError()
self.assertIsNone(utils.maybe_fileno(f))
| Elastica/kombu | kombu/tests/utils/test_utils.py | Python | bsd-3-clause | 10,301 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the admin template gatherer.'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import StringIO
import tempfile
import unittest
from grit.gather import admin_template
from grit import util
from grit import grd_reader
from grit import grit_runner
from grit.tool import build
class AdmGathererUnittest(unittest.TestCase):
def testParsingAndTranslating(self):
pseudofile = StringIO.StringIO(
'bingo bongo\n'
'ding dong\n'
'[strings] \n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer.FromFile(pseudofile)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() ==
'bingolabongola "the wise" fingulafongula')
translation = gatherer.Translate('en')
self.failUnless(translation == gatherer.GetText().strip())
def testErrorHandling(self):
pseudofile = StringIO.StringIO(
'bingo bongo\n'
'ding dong\n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer.FromFile(pseudofile)
self.assertRaises(admin_template.MalformedAdminTemplateException,
gatherer.Parse)
_TRANSLATABLES_FROM_FILE = (
'Google', 'Google Desktop', 'Preferences',
'Controls Google Desktop preferences',
'Indexing and Capture Control',
'Controls what files, web pages, and other content will be indexed by Google Desktop.',
'Prevent indexing of email',
# there are lots more but we don't check any further
)
def VerifyCliquesFromAdmFile(self, cliques):
self.failUnless(len(cliques) > 20)
for ix in range(len(self._TRANSLATABLES_FROM_FILE)):
text = cliques[ix].GetMessage().GetRealContent()
self.failUnless(text == self._TRANSLATABLES_FROM_FILE[ix])
def testFromFile(self):
fname = util.PathFromRoot('grit/testdata/GoogleDesktop.adm')
gatherer = admin_template.AdmGatherer.FromFile(fname)
gatherer.Parse()
cliques = gatherer.GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def MakeGrd(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3">
<release seq="3">
<structures>
<structure type="admin_template" name="IDAT_GOOGLE_DESKTOP_SEARCH"
file="GoogleDesktop.adm" exclude_from_rc="true" />
<structure type="txt" name="BINGOBONGO"
file="README.txt" exclude_from_rc="true" />
</structures>
</release>
<outputs>
<output filename="de_res.rc" type="rc_all" lang="de" />
</outputs>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
return grd
def testInGrd(self):
grd = self.MakeGrd()
cliques = grd.children[0].children[0].children[0].GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def testFileIsOutput(self):
grd = self.MakeGrd()
dirname = tempfile.mkdtemp()
try:
tool = build.RcBuilder()
tool.o = grit_runner.Options()
tool.output_directory = dirname
tool.res = grd
tool.Process()
self.failUnless(os.path.isfile(
os.path.join(dirname, 'de_GoogleDesktop.adm')))
self.failUnless(os.path.isfile(
os.path.join(dirname, 'de_README.txt')))
finally:
for f in os.listdir(dirname):
os.unlink(os.path.join(dirname, f))
os.rmdir(dirname)
if __name__ == '__main__':
unittest.main()
| JoKaWare/WTL-DUI | tools/grit/grit/gather/admin_template_unittest.py | Python | bsd-3-clause | 3,990 |
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 3, s, t 6.1, s, q"
tags = "MoveBy"
import cocos
from cocos.director import director
from cocos.actions import MoveBy
from cocos.sprite import Sprite
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite( 'grossini.png', (x/2, y/2) )
self.add( self.sprite, name='sprite' )
self.sprite.do( MoveBy( (x/2,y/2), 6 ) )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene()
main_scene.add(test_layer, name='test_layer')
director.run (main_scene)
if __name__ == '__main__':
main()
| eevee/cocos2d-mirror | test/test_moveby.py | Python | bsd-3-clause | 867 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
from devtoolslib.shell import Shell
from devtoolslib import http_server
class LinuxShell(Shell):
"""Wrapper around Mojo shell running on Linux.
Args:
executable_path: path to the shell binary
command_prefix: optional list of arguments to prepend to the shell command,
allowing e.g. to run the shell under debugger.
"""
def __init__(self, executable_path, command_prefix=None):
self.executable_path = executable_path
self.command_prefix = command_prefix if command_prefix else []
def ServeLocalDirectory(self, local_dir_path, port=0):
"""Serves the content of the local (host) directory, making it available to
the shell under the url returned by the function.
The server will run on a separate thread until the program terminates. The
call returns immediately.
Args:
local_dir_path: path to the directory to be served
port: port at which the server will be available to the shell
Returns:
The url that the shell can use to access the content of |local_dir_path|.
"""
return 'http://%s:%d/' % http_server.StartHttpServer(local_dir_path, port)
def Run(self, arguments):
"""Runs the shell with given arguments until shell exits, passing the stdout
mingled with stderr produced by the shell onto the stdout.
Returns:
Exit code retured by the shell or None if the exit code cannot be
retrieved.
"""
command = self.command_prefix + [self.executable_path] + arguments
return subprocess.call(command, stderr=subprocess.STDOUT)
def RunAndGetOutput(self, arguments):
"""Runs the shell with given arguments until shell exits.
Args:
arguments: list of arguments for the shell
Returns:
A tuple of (return_code, output). |return_code| is the exit code returned
by the shell or None if the exit code cannot be retrieved. |output| is the
stdout mingled with the stderr produced by the shell.
"""
command = self.command_prefix + [self.executable_path] + arguments
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(output, _) = p.communicate()
return p.returncode, output
| collinjackson/mojo | mojo/devtools/common/devtoolslib/linux_shell.py | Python | bsd-3-clause | 2,385 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'git',
]
def RunSteps(api):
url = 'https://chromium.googlesource.com/chromium/src.git'
# git.checkout can optionally dump GIT_CURL_VERBOSE traces to a log file,
# useful for debugging git access issues that are reproducible only on bots.
curl_trace_file = None
if api.properties.get('use_curl_trace'):
curl_trace_file = api.path['start_dir'].join('curl_trace.log')
submodule_update_force = api.properties.get('submodule_update_force', False)
submodule_update_recursive = api.properties.get('submodule_update_recursive',
True)
# You can use api.git.checkout to perform all the steps of a safe checkout.
revision = (api.buildbucket.gitiles_commit.ref or
api.buildbucket.gitiles_commit.id)
retVal = api.git.checkout(
url,
ref=revision,
recursive=True,
submodule_update_force=submodule_update_force,
set_got_revision=api.properties.get('set_got_revision'),
curl_trace_file=curl_trace_file,
remote_name=api.properties.get('remote_name'),
display_fetch_size=api.properties.get('display_fetch_size'),
file_name=api.properties.get('checkout_file_name'),
submodule_update_recursive=submodule_update_recursive,
use_git_cache=api.properties.get('use_git_cache'),
tags=api.properties.get('tags'))
assert retVal == "deadbeef", (
"expected retVal to be %r but was %r" % ("deadbeef", retVal))
# count_objects shows number and size of objects in .git dir.
api.git.count_objects(
name='count-objects',
can_fail_build=api.properties.get('count_objects_can_fail_build'),
git_config_options={'foo': 'bar'})
# Get the remote URL.
api.git.get_remote_url(
step_test_data=lambda: api.raw_io.test_api.stream_output('foo'))
api.git.get_timestamp(test_data='foo')
# You can use api.git.fetch_tags to fetch all tags from the remote
api.git.fetch_tags(api.properties.get('remote_name'))
# If you need to run more arbitrary git commands, you can use api.git itself,
# which behaves like api.step(), but automatically sets the name of the step.
with api.context(cwd=api.path['checkout']):
api.git('status')
api.git('status', name='git status can_fail_build',
can_fail_build=True)
api.git('status', name='git status cannot_fail_build',
can_fail_build=False)
# You should run git new-branch before you upload something with git cl.
api.git.new_branch('refactor') # Upstream is origin/master by default.
# And use upstream kwarg to set up different upstream for tracking.
api.git.new_branch('feature', upstream='refactor')
# You can use api.git.rebase to rebase the current branch onto another one
api.git.rebase(name_prefix='my repo', branch='origin/master',
dir_path=api.path['checkout'],
remote_name=api.properties.get('remote_name'))
if api.properties.get('cat_file', None):
step_result = api.git.cat_file_at_commit(api.properties['cat_file'],
revision,
stdout=api.raw_io.output())
if 'TestOutput' in step_result.stdout:
pass # Success!
# Bundle the repository.
api.git.bundle_create(
api.path['start_dir'].join('all.bundle'))
def GenTests(api):
yield api.test('basic')
yield api.test('basic_tags') + api.properties(tags=True)
yield api.test('basic_ref') + api.buildbucket.ci_build(git_ref='refs/foo/bar')
yield api.test('basic_branch') + api.buildbucket.ci_build(
git_ref='refs/heads/testing')
yield api.test('basic_hash') + api.buildbucket.ci_build(
revision='abcdef0123456789abcdef0123456789abcdef01', git_ref=None)
yield api.test('basic_file_name') + api.properties(checkout_file_name='DEPS')
yield api.test('basic_submodule_update_force') + api.properties(
submodule_update_force=True)
yield api.test('platform_win') + api.platform.name('win')
yield (
api.test('curl_trace_file') +
api.properties(use_curl_trace=True) +
api.buildbucket.ci_build(git_ref='refs/foo/bar')
)
yield (
api.test('can_fail_build') +
api.step_data('git status can_fail_build', retcode=1)
)
yield (
api.test('cannot_fail_build') +
api.step_data('git status cannot_fail_build', retcode=1)
)
yield (
api.test('set_got_revision') +
api.properties(set_got_revision=True)
)
yield (
api.test('rebase_failed') +
api.step_data('my repo rebase', retcode=1)
)
yield api.test('remote_not_origin') + api.properties(remote_name='not_origin')
yield (
api.test('count-objects_delta') +
api.properties(display_fetch_size=True))
yield (
api.test('count-objects_failed') +
api.step_data('count-objects', retcode=1))
yield (
api.test('count-objects_with_bad_output') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))))
yield (
api.test('count-objects_with_bad_output_fails_build') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))) +
api.properties(count_objects_can_fail_build=True))
yield (
api.test('cat-file_test') +
api.step_data('git cat-file abcdef12345:TestFile',
stdout=api.raw_io.output('TestOutput')) +
api.buildbucket.ci_build(revision='abcdef12345', git_ref=None) +
api.properties(cat_file='TestFile'))
yield (
api.test('git-cache-checkout') +
api.properties(use_git_cache=True))
| endlessm/chromium-browser | third_party/depot_tools/recipes/recipe_modules/git/examples/full.py | Python | bsd-3-clause | 5,942 |
from django.test import TestCase
import time
from .models import SimpleTree, MPTTTree, TBMP, TBNS
def timeit(method):
""" Measure time of method's execution.
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '\n%r: %2.2f sec' % \
(method.__name__, te - ts)
return result
return timed
CYCLES = 8
class Benchmark(object):
@timeit
def test_creation(self):
self._create_tree()
def test_delete(self):
self._create_tree(cycles=7)
@timeit
def test_deletion():
for _ in xrange(pow(2, CYCLES) / 2):
self._delete_last()
test_deletion()
def test_get(self):
self._create_tree(cycles=7)
@timeit
def test_get_tree():
root = self._get_root()
for _ in xrange(100):
self._get_tree(root)
test_get_tree()
def _create_tree(self, cycles=CYCLES):
root = self._create_root(title='root1')
nodes = [root]
for _ in xrange(CYCLES):
new_nodes = []
for node in nodes:
new_nodes.append(self._create_child(parent=node))
new_nodes.append(self._create_child(parent=node))
nodes = new_nodes
return nodes
def _create_root(self, **params):
pass
def _create_child(self, parent, **params):
pass
def _delete_last(self):
pass
def _get_root(self):
pass
def _get_tree(self, parent):
pass
class SimpleTest(TestCase, Benchmark):
def setUp(self):
print "\nSimpleTree benchmark"
def _create_root(self, **params):
return SimpleTree.objects.create(**params)
def _create_child(self, parent, **params):
return SimpleTree.objects.create(parent=parent, **params)
def _delete_last(self):
SimpleTree.objects.order_by('-id')[0].delete()
def _get_root(self):
return SimpleTree.objects.get(parent=None)
def _get_tree(self, parent):
return parent.get_tree()
class MPTTTest(TestCase, Benchmark):
def setUp(self):
print "\nMPTT benchmark"
def _create_root(self, **params):
return MPTTTree.objects.create(**params)
def _create_child(self, parent, **params):
return MPTTTree.objects.create(parent=parent, **params)
def _delete_last(self):
MPTTTree.objects.order_by('-id')[0].delete()
def _get_root(self):
return MPTTTree.objects.get(parent=None)
def _get_tree(self, parent):
return list(parent.get_ancestors()) + list(parent.get_descendants(include_self=False))
class TreeBeardMP(TestCase, Benchmark):
def setUp(self):
print "\nTreebeard MP benchmark"
def _create_root(self, **params):
return TBMP.add_root(**params)
def _create_child(self, parent, **params):
return parent.add_child(**params)
def _delete_last(self):
TBMP.objects.order_by('-id')[0].delete()
def _get_root(self):
return TBMP.get_root_nodes()[0]
def _get_tree(self, parent):
TBMP.get_tree(parent=parent)
class TreeBeardNS(TreeBeardMP):
def setUp(self):
print "\nTreebeard NS benchmark"
def _create_root(self, **params):
return TBNS.add_root(**params)
def _delete_last(self):
TBNS.objects.order_by('-id')[0].delete()
def _get_root(self):
return TBNS.get_root_nodes()[0]
def _get_tree(self, parent):
TBNS.get_tree(parent=parent)
| klen/simpletree | benchmark/main/tests.py | Python | bsd-3-clause | 3,596 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-28 17:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('voting', '0002_auto_20150813_2010'),
]
operations = [
migrations.CreateModel(
name='VoteToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticket_code', models.CharField(max_length=255)),
('token_sent', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| WebCampZg/conference-web | voting/migrations/0003_votetoken.py | Python | bsd-3-clause | 880 |
import uuid
import os
import shutil
import urlparse
import re
import hashlib
from lxml import html
from PIL import Image, ImageFile
from django.conf import settings
import views
ImageFile.MAXBLOCKS = 10000000
def match_or_none(string, rx):
"""
Tries to match a regular expression and returns an integer if it can.
Otherwise, returns None.
@param string: String to match against
@type string: basestring
@param rx: compiled regular expression
@return: number or None
@rtype: int/long or None
"""
if string is None:
return None
match = rx.search(string)
if match:
return int(match.groups()[0])
return None
width_rx = re.compile(r'width\s*:\s*(\d+)(px)?')
height_rx = re.compile(r'height\s*:\s*(\d+)(px)?')
def get_dimensions(img):
"""
Attempts to get the dimensions of an image from the img tag.
It first tries to grab it from the css styles and then falls back
to looking at the attributes.
@param img: Image tag.
@type img: etree._Element
@return: width and height of the image
@rtype: (int or None, int or None)
"""
styles = img.attrib.get('style')
width = match_or_none(styles, width_rx) or img.attrib.get('width')
if isinstance(width, basestring):
width = int(width)
height = match_or_none(styles, height_rx) or img.attrib.get('height')
if isinstance(height, basestring):
height= int(height)
return width, height
def get_local_path(url):
"""
Converts a url to a local path
@param url: Url to convert
@type url: basestring
@return: Local path of the url
@rtype: basestring
"""
url = urlparse.unquote(url)
local_path = settings.STATIC_ROOT + os.path.normpath(url[len(settings.STATIC_URL):])
return local_path
# `buffer` is needed since hashlib apparently isn't unicode safe
hexhash = lambda s: hashlib.md5(buffer(s)).hexdigest()
def new_rendered_path(orig_path, width, height, ext=None):
"""
Builds a new rendered path based on the original path, width, and height.
It takes a hash of the original path to prevent users from accidentally
(or purposely) overwritting other's rendered thumbnails.
This isn't perfect: we are assuming that the original file's conents never
changes, which is the django default. We could make this function more
robust by hashing the file everytime we save but that has the obvious
disadvantage of having to hash the file everytime. YMMV.
@param orig_path: Path to the original image.
@type orig_path: "/path/to/file"
@param width: Desired width of the rendered image.
@type width: int or None
@param height: Desired height of the rendered image.
@type height: int or None
@param ext: Desired extension of the new image. If None, uses
the original extension.
@type ext: basestring or None
@return: Absolute path to where the rendered image should live.
@rtype: "/path/to/rendered/image"
"""
dirname = os.path.dirname(orig_path)
rendered_path = os.path.join(dirname, 'rendered')
if not os.path.exists(rendered_path):
os.mkdir(rendered_path)
hash_path = hexhash(orig_path)
if ext is None:
ext = os.path.splitext(os.path.basename(orig_path))[1]
if ext and ext[0] != u'.':
ext = u'.' + ext
name = '%s_%sx%s' % (hash_path, width, height)
return os.path.join(rendered_path, name) + ext
def is_rendered(path, width, height):
"""
Checks whether or not an image has been rendered to the given path
with the given dimensions
@param path: path to check
@type path: u"/path/to/image"
@param width: Desired width
@type width: int
@param height: Desired height
@type height: int
@return: Whether or not the image is correct
@rtype: bool
"""
if os.path.exists(path):
old_width, old_height = Image.open(path).size
return old_width == width and old_height == height
return False
def transcode_to_jpeg(image, path, width, height):
"""
Transcodes an image to JPEG.
@param image: Opened image to transcode to jpeg.
@type image: PIL.Image
@param path: Path to the opened image.
@type path: u"/path/to/image"
@param width: Desired width of the transcoded image.
@type width: int
@param height: Desired height of the transcoded image.
@type height: int
@return: Path to the new transcoded image.
@rtype: "/path/to/image"
"""
i_width, i_height = image.size
new_width = i_width if width is None else width
new_height = i_height if height is None else height
new_path = new_rendered_path(path, width, height, ext='jpg')
if is_rendered(new_path, new_width, new_height):
return new_path
new_image = image.resize((new_width, new_height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def re_render(path, width, height):
"""
Given an original image, width, and height, creates a thumbnailed image
of the exact dimensions given. We skip animated gifs because PIL can't
resize those automatically whereas browsers can contort them easily. We
also don't stretch images at all and return the original in that case.
@param path: Path to the original image
@type path: "/path/to/image"
@param width: Desired width
@type width: int or None
@param height: Desired height
@type height: int or None
@return: Path to the 'rendered' image.
@rtype: "/path/to/image"
"""
try:
image = Image.open(path)
except IOError:
# Probably doesn't exist or isn't an image
return path
# We have to call image.load first due to a PIL 1.1.7 bug
image.load()
if image.format == 'PNG' and getattr(settings, 'CKEDITOR_PNG_TO_JPEG', False):
pixels = reduce(lambda a,b: a*b, image.size)
# check that our entire alpha channel is set to full opaque
if image.mode == 'RGB' or image.split()[-1].histogram()[-1] == pixels:
return transcode_to_jpeg(image, path, width, height)
if image.size <= (width, height):
return path
if width is None and height is None:
return path
# We can't resize animated gifs
if image.format == 'GIF':
try:
image.seek(1)
return path
except EOFError:
# Static GIFs should throw an EOF on seek
pass
new_path = new_rendered_path(path, width, height)
if is_rendered(new_path, width, height):
return new_path
# Re-render the image, optimizing for filesize
new_image = image.resize((width, height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def get_html_tree(content):
return html.fragment_fromstring(content, create_parent='div')
def render_html_tree(tree):
return html.tostring(tree)[5:-6]
def resize_images(post_content):
"""
Goes through all images, resizing those that we know to be local to the
correct image size.
@param post_content: Raw html of the content to search for images with.
@type post_content: basestring containg HTML fragments
@return: Modified contents.
@rtype: basestring
"""
# Get tree
tree = get_html_tree(post_content)
# Get images
imgs = tree.xpath('//img[starts-with(@src, "%s")]' % settings.STATIC_URL)
for img in imgs:
orig_url = img.attrib['src']
orig_path = get_local_path(orig_url)
width, height = get_dimensions(img)
rendered_path = re_render(orig_path, width, height)
# If we haven't changed the image, move along.
if rendered_path == orig_path:
continue
# Flip to the rendered
img.attrib['data-original'] = orig_url
img.attrib['src'] = views.get_media_url(rendered_path)
# Strip of wrapping div tag
return render_html_tree(tree)
def swap_in_originals(content):
if 'data-original' not in content:
return content
tree = get_html_tree(content)
for img in tree.xpath('//img[@data-original]'):
img.attrib['src'] = img.attrib['data-original']
del img.attrib['data-original']
return render_html_tree(tree)
| ZG-Tennis/django-ckeditor | ckeditor/utils.py | Python | bsd-3-clause | 8,404 |
from django.conf import settings
from site_news.models import SiteNewsItem
def site_news(request):
"""
Inserts the currently active news items into the template context.
This ignores MAX_SITE_NEWS_ITEMS.
"""
# Grab all active items in proper date/time range.
items = SiteNewsItem.current_and_active.all()
return {'site_news_items': items}
| glesica/django-site-news | site_news/context_processors.py | Python | bsd-3-clause | 379 |
# -*- coding: utf-8 -*-
import logging
import re
import importlib
import django
import six
from django.contrib.sites.shortcuts import get_current_site
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.module_loading import import_string
from django.utils.html import conditional_escape
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.urls import (URLResolver as RegexURLResolver, URLPattern as RegexURLPattern, Resolver404, get_resolver,
clear_url_caches)
logger = logging.getLogger(__name__)
class NotSet(object):
""" A singleton to identify unset values (where None would have meaning) """
def __str__(self):
return "NotSet"
def __repr__(self):
return self.__str__()
NotSet = NotSet()
class Literal(object):
""" Wrap literal values so that the system knows to treat them that way """
def __init__(self, value):
self.value = value
def _pattern_resolve_to_name(pattern, path):
if django.VERSION < (2, 0):
match = pattern.regex.search(path)
else:
match = pattern.pattern.regex.search(path)
if match:
name = ""
if pattern.name:
name = pattern.name
elif hasattr(pattern, '_callback_str'):
name = pattern._callback_str
else:
name = "%s.%s" % (pattern.callback.__module__, pattern.callback.func_name)
return name
def _resolver_resolve_to_name(resolver, path):
tried = []
django1 = django.VERSION < (2, 0)
if django1:
match = resolver.regex.search(path)
else:
match = resolver.pattern.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in resolver.url_patterns:
try:
if isinstance(pattern, RegexURLPattern):
name = _pattern_resolve_to_name(pattern, new_path)
elif isinstance(pattern, RegexURLResolver):
name = _resolver_resolve_to_name(pattern, new_path)
except Resolver404 as e:
if django1:
tried.extend([(pattern.regex.pattern + ' ' + t) for t in e.args[0]['tried']])
else:
tried.extend([(pattern.pattern.regex.pattern + ' ' + t) for t in e.args[0]['tried']])
else:
if name:
return name
if django1:
tried.append(pattern.regex.pattern)
else:
tried.append(pattern.pattern.regex.pattern)
raise Resolver404({'tried': tried, 'path': new_path})
def resolve_to_name(path, urlconf=None):
try:
return _resolver_resolve_to_name(get_resolver(urlconf), path)
except Resolver404:
return None
def _replace_quot(match):
unescape = lambda v: v.replace('"', '"').replace('&', '&')
return u'<%s%s>' % (unescape(match.group(1)), unescape(match.group(3)))
def escape_tags(value, valid_tags):
""" Strips text from the given html string, leaving only tags.
This functionality requires BeautifulSoup, nothing will be
done otherwise.
This isn't perfect. Someone could put javascript in here:
<a onClick="alert('hi');">test</a>
So if you use valid_tags, you still need to trust your data entry.
Or we could try:
- only escape the non matching bits
- use BeautifulSoup to understand the elements, escape everything
else and remove potentially harmful attributes (onClick).
- Remove this feature entirely. Half-escaping things securely is
very difficult, developers should not be lured into a false
sense of security.
"""
# 1. escape everything
value = conditional_escape(value)
# 2. Reenable certain tags
if valid_tags:
# TODO: precompile somewhere once?
tag_re = re.compile(r'<(\s*/?\s*(%s))(.*?\s*)>' %
u'|'.join(re.escape(tag) for tag in valid_tags))
value = tag_re.sub(_replace_quot, value)
# Allow comments to be hidden
value = value.replace("<!--", "<!--").replace("-->", "-->")
return mark_safe(value)
def _get_seo_content_types(seo_models):
""" Returns a list of content types from the models defined in settings
(SEO_MODELS)
"""
from django.contrib.contenttypes.models import ContentType
try:
return [ContentType.objects.get_for_model(m).id for m in seo_models]
except: # previously caught DatabaseError
# Return an empty list if this is called too early
return []
def get_seo_content_types(seo_models):
return lazy(_get_seo_content_types, list)(seo_models)
def _reload_urlconf():
"""
Reload Django URL configuration and clean caches
"""
module = importlib.import_module(settings.ROOT_URLCONF)
if six.PY2:
reload(module)
else:
importlib.reload(module)
clear_url_caches()
def register_model_in_admin(model, admin_class=None):
"""
Register model in Django admin interface
"""
from django.contrib import admin
admin.site.register(model, admin_class)
_reload_urlconf()
def create_dynamic_model(model_name, app_label='djangoseo', **attrs):
"""
Create dynamic Django model
"""
module_name = '%s.models' % app_label
default_attrs = {
'__module__': module_name,
'__dynamic__': True
}
attrs.update(default_attrs)
if six.PY2:
model_name = str(model_name)
return type(model_name, (models.Model,), attrs)
def import_tracked_models():
"""
Import models
"""
redirects_models = getattr(settings, 'SEO_TRACKED_MODELS', [])
models = []
for model_path in redirects_models:
try:
model = import_string(model_path)
models.append(model)
except ImportError as e:
logging.warning("Failed to import model from path '%s'" % model_path)
return models
def handle_seo_redirects(request):
"""
Handle SEO redirects. Create Redirect instance if exists redirect pattern.
:param request: Django request
"""
from .models import RedirectPattern, Redirect
if not getattr(settings, 'SEO_USE_REDIRECTS', False):
return
full_path = request.get_full_path()
current_site = get_current_site(request)
subdomain = getattr(request, 'subdomain', '')
redirect_patterns = RedirectPattern.objects.filter(
Q(site=current_site),
Q(subdomain=subdomain) | Q(all_subdomains=True)
).order_by('all_subdomains')
for redirect_pattern in redirect_patterns:
if re.match(redirect_pattern.url_pattern, full_path):
kwargs = {
'site': current_site,
'old_path': full_path,
'new_path': redirect_pattern.redirect_path,
'subdomain': redirect_pattern.subdomain,
'all_subdomains': redirect_pattern.all_subdomains
}
try:
Redirect.objects.get_or_create(**kwargs)
except Exception:
logger.warning('Failed to create redirection', exc_info=True, extra=kwargs)
break
| whyflyru/django-seo | djangoseo/utils.py | Python | bsd-3-clause | 7,375 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kitchen_sink.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| publica-io/django-publica-kitchen-sink | kitchen_sink/manage.py | Python | bsd-3-clause | 255 |
#!/usr/bin/env python3
import logging
from . import SubprocessHook
logger = logging.getLogger("barython")
class PulseAudioHook(SubprocessHook):
"""
Listen on pulseaudio events with pactl
"""
def __init__(self, cmd=["pactl", "subscribe", "-n", "barython"],
*args, **kwargs):
super().__init__(*args, **kwargs, cmd=cmd)
| Anthony25/barython | barython/hooks/audio.py | Python | bsd-3-clause | 363 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Common filesystem operations """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import stat
import time
import errno
import ntpath
import shutil
import tempfile
import warnings
import posixpath
import contextlib
import subprocess
from qisys import ui
try:
from xdg.BaseDirectory import xdg_cache_home, xdg_config_home, xdg_data_home
except ImportError:
xdg_config_home = os.path.expanduser("~/.config")
xdg_cache_home = os.path.expanduser("~/.cache")
xdg_data_home = os.path.expanduser("~/.local/share")
CONFIG_PATH = xdg_config_home
CACHE_PATH = xdg_cache_home
SHARE_PATH = xdg_data_home
def set_home(home):
""" Set Home """
# This module should be refactored into object to avoid the anti-pattern global statement
global CONFIG_PATH, CACHE_PATH, SHARE_PATH
CONFIG_PATH = os.path.join(home, "config")
CACHE_PATH = os.path.join(home, "cache")
SHARE_PATH = os.path.join(home, "share")
def get_config_path(*args):
"""
Get a config path to read or write some configuration.
:param args: a list of subfolders. Those will be created when needed
"""
return get_path(CONFIG_PATH, *args)
def get_cache_path(*args):
"""
Get a config path to read or write some cached data
:param args: a list of subfolders. Those will be created when needed
"""
return get_path(CACHE_PATH, *args)
def get_share_path(*args):
"""
Get a config path to read or write some persistent data
:param args: a list of subfolders. Those will be created when needed
"""
return get_path(SHARE_PATH, *args)
def get_path(*args):
""" Helper for get_*_path methods """
full_path = os.path.join(*args)
to_make = os.path.dirname(full_path)
mkdir(to_make, recursive=True)
full_path = to_native_path(full_path)
return full_path
def username():
""" Get the current user name """
if os.name != 'nt':
import pwd
uid = os.getuid() # pylint:disable=no-member
pw_info = pwd.getpwuid(uid)
if pw_info:
return pw_info.pw_name
_username = os.environ.get("USERNAME")
if _username:
return _username
return None
def mkdir(dest_dir, recursive=False):
""" Recursive mkdir (do not fail if file exists) """
try:
if recursive:
os.makedirs(dest_dir)
else:
os.mkdir(dest_dir)
except OSError as exc:
if exc.errno != 17:
raise
# Directory already exists -> no exception
def ln(src, dst, symlink=True):
""" ln (do not fail if file exists) """
try:
if symlink:
os.symlink(src, dst) # pylint:disable=no-member
else:
raise NotImplementedError
except OSError as exc:
if exc.errno != 17:
raise
def write_file_if_different(data, out_path, mode="w"):
""" Write the data to out_path if the content is different """
try:
with open(out_path, "r") as outr:
out_prev = outr.read()
if out_prev == data:
ui.debug("skipping write to %s: same content" % (out_path))
return
except Exception:
pass
with open(out_path, mode) as out_file:
out_file.write(data)
def configure_file__legacy(in_path, out_path, copy_only=False,
*args, **kwargs): # pylint:disable=keyword-arg-before-vararg
"""
Configure a file.
:param in_path: input file
:param out_path: output file
The out_path needs not to exist, missing leading directories will
be created if necessary.
If copy_only is True, the contents will be copied "as is".
If not, we will use the args and kwargs parameter as in::
in_content.format(*args, **kwargs)
"""
# This function seems to be never called, and has been renamed with __legacy suffix (2020-02-07)
# If nobody complains, remove this function in the next release
warnings.warn(
"Deprecated function: "
"This function seems to be never called, and has been renamed with __legacy suffix (2020-02-07)\n"
"If nobody complains, remove this function in the next release, else, deals with its bad args/kwargs signature",
DeprecationWarning)
mkdir(os.path.dirname(os.path.abspath(out_path)), recursive=True)
with open(in_path, "r") as in_file:
in_content = in_file.read()
if copy_only:
out_content = in_content
else:
out_content = in_content.format(*args, **kwargs)
write_file_if_different(out_content, out_path)
def _copy_link(src, dest, quiet):
""" Copy Link """
if not os.path.islink(src):
raise Exception("%s is not a link!" % src)
target = os.readlink(src) # pylint:disable=no-member
# remove existing stuff
if os.path.lexists(dest):
rm(dest)
if sys.stdout.isatty() and not quiet:
print("-- Installing %s -> %s" % (dest, target))
to_make = os.path.dirname(dest)
mkdir(to_make, recursive=True)
os.symlink(target, dest) # pylint:disable=no-member
def _handle_dirs(src, dest, root, directories, filter_fun, quiet):
""" Helper function used by install() """
installed = list()
rel_root = os.path.relpath(root, src)
# To avoid filering './' stuff
if rel_root == ".":
rel_root = ""
new_root = os.path.join(dest, rel_root)
for directory in directories:
to_filter = os.path.join(rel_root, directory)
if not filter_fun(to_filter):
continue
dsrc = os.path.join(root, directory)
ddest = os.path.join(new_root, directory)
if os.path.islink(dsrc):
_copy_link(dsrc, ddest, quiet)
installed.append(directory)
else:
if os.path.lexists(ddest) and not os.path.isdir(ddest):
raise Exception("Expecting a directory but found a file: %s" % ddest)
mkdir(ddest, recursive=True)
return installed
def _handle_files(src, dest, root, files, filter_fun, quiet):
""" Helper function used by install() """
installed = list()
rel_root = os.path.relpath(root, src)
if rel_root == ".":
rel_root = ""
new_root = os.path.join(dest, rel_root)
for f in files:
if not filter_fun(os.path.join(rel_root, f)):
continue
fsrc = os.path.join(root, f)
fdest = os.path.join(new_root, f)
rel_path = os.path.join(rel_root, f)
if os.path.islink(fsrc):
mkdir(new_root, recursive=True)
_copy_link(fsrc, fdest, quiet)
installed.append(rel_path)
else:
if os.path.lexists(fdest) and os.path.isdir(fdest):
raise Exception("Expecting a file but found a directory: %s" % fdest)
if not quiet:
print("-- Installing %s" % fdest.encode('ascii', "ignore"))
mkdir(new_root, recursive=True)
# We do not want to fail if dest exists but is read only
# (following what `install` does, but not what `cp` does)
rm(fdest)
shutil.copy(fsrc, fdest)
installed.append(rel_path)
return installed
def install(src, dest, filter_fun=None, quiet=False):
"""
Install a directory or a file to a destination.
If filter_fun is not None, then the file will only be
installed if filter_fun(relative/path/to/file) returns True.
If ``dest`` does not exist, it will be created first.
When installing files, if the destination already exists,
it will be removed first, then overwritten by the new file.
This function will preserve relative symlinks between directories,
used for instance in Mac frameworks::
|__ Versions
|__ Current -> 4.0
|__ 4 -> 4.0
|__ 4.0
Return the list of files installed (with relative paths)
"""
installed = list()
# FIXME: add a `safe mode` ala install?
if not os.path.exists(src):
mess = "Could not install '%s' to '%s'\n" % (src, dest)
mess += '%s does not exist' % src
raise Exception(mess)
src = to_native_path(src, normcase=False)
dest = to_native_path(dest, normcase=False)
ui.debug("Installing", src, "->", dest)
if filter_fun is None:
def no_filter_fun(_unused):
""" Filter Function Always True """
return True
filter_fun = no_filter_fun
if os.path.isdir(src):
if src == dest:
raise Exception("source and destination are the same directory")
for (root, dirs, files) in os.walk(src):
dirs = _handle_dirs(src, dest, root, dirs, filter_fun, quiet)
files = _handle_files(src, dest, root, files, filter_fun, quiet)
installed.extend(files)
else:
# Emulate posix `install' behavior:
# if dest is a dir, install in the directory, else
# simply copy the file.
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(src))
if src == dest:
raise Exception("source and destination are the same file")
mkdir(os.path.dirname(dest), recursive=True)
if sys.stdout.isatty() and not quiet:
print("-- Installing %s" % dest)
# We do not want to fail if dest exists but is read only
# (following what `install` does, but not what `cp` does)
rm(dest)
shutil.copy(src, dest)
installed.append(os.path.basename(src))
return installed
def safe_copy(src, dest):
"""
Copy a source file to a destination but
do not overwrite dest if it is more recent than src
Create any missing directories when necessary
If dest is a directory, src will be copied inside dest.
"""
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(src))
if not up_to_date(dest, src):
shutil.copy(src, dest)
def up_to_date(output_path, input_path):
"""" Return True if output_path exists and is more recent than input_path """
if not os.path.exists(output_path):
return False
out_mtime = os.stat(output_path).st_mtime
in_mtime = os.stat(input_path).st_mtime
return out_mtime > in_mtime
def copy_git_src(src, dest):
"""
Copy a source to a destination but only copy the files under version control.
Assumes that ``src`` is inside a git worktree
"""
process = subprocess.Popen(["git", "ls-files", "."], cwd=src,
stdout=subprocess.PIPE)
(out, _) = process.communicate()
for filename in out.splitlines():
src_file = os.path.join(src, filename.decode('ascii'))
dest_file = os.path.join(dest, filename.decode('ascii'))
install(src_file, dest_file, quiet=True)
def rm(name):
"""
This one can take a file or a directory.
Contrary to shutil.remove or os.remove, it:
* won't fail if the directory does not exist
* won't fail if the directory contains read-only files
* won't fail if the file does not exist
Please avoid using shutil.rmtree ...
"""
if not os.path.lexists(name):
return
if os.path.isdir(name) and not os.path.islink(name):
ui.debug("Removing directory:", name)
rmtree(name.encode('ascii', "ignore"))
else:
ui.debug("Removing", name)
os.remove(name)
def rmtree(path):
"""
shutil.rmtree() on steroids.
Taken from gclient source code (BSD license)
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
``path`` itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on ``path``'s parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Exception('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Some people don't have the APIs installed. In that case we'll do without.
win32api = None
win32con = None
try:
import win32api
import win32con
except ImportError:
pass
else:
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
""" Remove """
if sys.platform == 'win32':
os.chmod(subpath, stat.S_IWRITE)
if win32api and win32con:
win32api.SetFileAttributes(subpath, win32con.FILE_ATTRIBUTE_NORMAL)
try:
func(subpath)
except OSError as e:
if e.errno != errno.EACCES or sys.platform != 'win32':
raise
# Failed to delete, try again after a 100ms sleep.
time.sleep(0.1)
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
def mv(src, dest):
""" Move a file into a directory, but do not crash if dest/src exists """
if src == dest:
return
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(src))
if os.path.exists(dest):
rm(dest)
ui.debug(src, "->", dest)
shutil.move(src, dest)
def ls_r(directory):
"""
Returns a sorted list of all the files present in a directory,
relative to this directory.
For instance, with::
foo
|__ eggs
| |__ c
| |__ d
|__ empty
|__ spam
|__a
|__b
ls_r(foo) returns:
["eggs/c", "eggs/d", "empty/", "spam/a", "spam/b"]
"""
res = list()
for root, dirs, files in os.walk(directory):
new_root = os.path.relpath(root, directory)
if new_root == "." and not files:
continue
if new_root == "." and files:
res.extend(files)
continue
if not files and not dirs:
res.append(new_root + os.path.sep)
continue
for f in files:
res.append(os.path.join(new_root, f))
return sorted(res)
def which(program):
"""
find program in the environment PATH
:return: path to program if found, None otherwise
"""
warnings.warn("qisys.sh.which is deprecated, "
"use qisys.command.find_program instead")
from qisys.command import find_program
return find_program(program)
def to_posix_path(path, fix_drive=False):
"""
Returns a POSIX path from a DOS path
:param fix_drive: if True, will replace c: by /c/ (ala mingw)
"""
res = os.path.expanduser(path)
res = os.path.abspath(res)
res = path.replace(ntpath.sep, posixpath.sep)
if fix_drive:
(drive, rest) = os.path.splitdrive(res)
letter = drive[0]
return "/" + letter + rest
return res
def to_dos_path(path):
"""
Return a DOS path from a "windows with /" path.
Useful because people sometimes use forward slash in
environment variable, for instance
"""
res = path.replace(posixpath.sep, ntpath.sep)
return res
def to_native_path(path, normcase=True):
"""
Return an absolute, native path from a path,
:param normcase: make sure the path is all lower-case on
case-insensitive filesystems
"""
path = os.path.expanduser(path)
if normcase:
path = os.path.normcase(path)
path = os.path.normpath(path)
path = os.path.abspath(path)
path = os.path.realpath(path)
if sys.platform.startswith("win"):
path = to_dos_path(path)
return path
def is_path_inside(a, b):
"""
Returns True if a is inside b
>>> is_path_inside("foo/bar", "foo")
True
>>> is_path_inside("gui/bar/libfoo", "lib")
False
"""
a = to_native_path(a)
b = to_native_path(b)
a_split = a.split(os.path.sep)
b_split = b.split(os.path.sep)
if len(a_split) < len(b_split):
return False
for (a_part, b_part) in zip(a_split, b_split):
if a_part != b_part:
return False
return True
def is_empty(path):
""" Check if a path is empty """
return os.listdir(path) == list()
class TempDir(object):
"""
This is a nice wrapper around tempfile module.
Usage::
with TempDir("foo-bar") as temp_dir:
subdir = os.path.join(temp_dir, "subdir")
do_foo(subdir)
This piece of code makes sure that:
* a temporary directory named temp_dir has been
created (guaranteed to exist, be empty, and writeable)
* the directory will be removed when the scope of
temp_dir has ended unless an exception has occurred
and DEBUG environment variable is set.
"""
def __init__(self, name="tmp"):
""" TempDir Init """
self._temp_dir = tempfile.mkdtemp(prefix=name + "-")
def __enter__(self):
""" Enter """
return self._temp_dir
def __exit__(self, _type, value, tb):
""" Exit """
if os.environ.get("DEBUG"):
if tb is not None:
print("==")
print("Not removing ", self._temp_dir)
print("==")
return
rm(self._temp_dir)
@contextlib.contextmanager
def change_cwd(directory):
""" Change the current working dir """
if not os.path.exists(directory):
mess = "Cannot change working dir to '%s'\n" % directory
mess += "This path does not exist"
raise Exception(mess)
previous_cwd = os.getcwd()
os.chdir(directory)
yield
os.chdir(previous_cwd)
def is_runtime(filename):
""" Filter function to only install runtime components of packages """
# FIXME: this looks like a hack.
# Maybe a user-generated MANIFEST at the root of the package path
# would be better?
basedir = filename.split(os.path.sep)[0]
if filename.startswith("bin") and sys.platform.startswith("win"):
return filename.endswith(".exe") or filename.endswith(".dll")
if filename.startswith("lib"):
is_lib_prefixed_runtime = not filename.endswith((".a", ".lib", ".la", ".pc"))
return is_lib_prefixed_runtime
if filename.startswith(os.path.join("share", "cmake")) or \
filename.startswith(os.path.join("share", "man")):
return False
if basedir == "include":
# Usually runtime dir names aren't include, but there is an exception for python:
return filename.endswith("pyconfig.h")
# True by default: better have too much stuff than not enough
# That includes these known cases:
# * filename.startswith("bin") but not sys.platform.startswith("win")
# * basedir == "share"
# * basedir.endswith(".framework")
return True
def broken_symlink(file_path):
""" Returns True if the file is a broken symlink """
return os.path.lexists(file_path) and not os.path.exists(file_path)
def is_binary(file_path):
""" Returns True if the file is binary """
with open(file_path, 'rb') as fp:
data = fp.read(1024)
if not data:
return False
if b'\0' in data:
return True
return False
def is_executable_binary(file_path):
"""
Returns true if the file:
* is executable
* is a binary (i.e not a script)
"""
if not os.path.isfile(file_path):
return False
if not os.access(file_path, os.X_OK):
return False
return is_binary(file_path)
class PreserveFileMetadata(object):
""" Preserve file metadata (permissions and times) """
def __init__(self, path):
""" Preserve file metadata of 'path' """
self.path = path
self.time = None
self.mode = None
def __enter__(self):
""" Enter method saving metadata """
st = os.stat(self.path)
self.time = (st.st_atime, st.st_mtime)
self.mode = st.st_mode
def __exit__(self, _type, value, tb):
""" Exit method restoring metadata """
os.chmod(self.path, self.mode)
os.utime(self.path, self.time)
| aldebaran/qibuild | python/qisys/sh.py | Python | bsd-3-clause | 22,082 |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SVR_rbf" , "freidman1" , "db2")
| antoinecarme/sklearn2sql_heroku | tests/regression/freidman1/ws_freidman1_SVR_rbf_db2_code_gen.py | Python | bsd-3-clause | 121 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-04 05:22
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Policy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_duration_date_start', models.DateTimeField(blank=True, null=True)),
('news_duration_date_end', models.DateTimeField(blank=True, null=True)),
('contests_duration_date_start', models.DateTimeField(blank=True, null=True)),
('contests_duration_date_end', models.DateTimeField(blank=True, null=True)),
('max_answers_per_question', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('map_radius', models.PositiveSmallIntegerField(default=500, validators=[django.core.validators.MinValueValidator(1)])),
('admin_email', models.CharField(default='[email protected]', max_length=100)),
('messages_new_account', models.TextField(blank=True, null=True)),
('messages_new_contest', models.TextField(blank=True, null=True)),
('messages_new_loyalty_item', models.TextField(blank=True, null=True)),
('messages_winner', models.TextField(blank=True, null=True)),
('last_update_datetime', models.DateTimeField(blank=True, null=True)),
('claiming_method', models.CharField(blank=True, max_length=200, null=True)),
('country', models.CharField(blank=True, choices=[('indonesia', 'Indonesia'), ('malaysia', 'Malaysia'), ('philippines', 'Philippines'), ('singapore', 'Singapore')], default='Philippines', max_length=15)),
('salesrep_no', models.CharField(blank=True, max_length=200, null=True)),
('last_update_by_author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'cms_policy',
'verbose_name_plural': 'Policies',
},
),
]
| acercado/jd-ph-cms | jd-ph-cms/policies/migrations/0001_initial.py | Python | bsd-3-clause | 2,519 |
from __future__ import absolute_import
# What follows is part of a hack to make control breaking work on windows even
# if scipy.stats ims imported. See:
# http://stackoverflow.com/questions/15457786/ctrl-c-crashes-python-after-importing-scipy-stats
import sys
import os
import imp
import ctypes
if sys.platform == 'win32':
basepath = imp.find_module('numpy')[1]
ctypes.CDLL(os.path.join(basepath, 'core', 'libmmd.dll'))
ctypes.CDLL(os.path.join(basepath, 'core', 'libifcoremd.dll'))
from .adadelta import Adadelta
from .adam import Adam
from .asgd import Asgd
from .bfgs import Bfgs, Lbfgs, Sbfgs
from .cg import ConjugateGradient, NonlinearConjugateGradient
from .gd import GradientDescent
from .nes import Xnes
from .rmsprop import RmsProp
from .rprop import Rprop
from .smd import Smd
from radagrad import Radagrad
from adagrad import Adagrad
from adagrad_full import AdagradFull
| gabobert/climin | climin/__init__.py | Python | bsd-3-clause | 899 |
#The DF of a tidal stream
import copy
import numpy
import multiprocessing
import scipy
from scipy import special, interpolate, integrate
if int(scipy.__version__.split('.')[1]) < 10: #pragma: no cover
from scipy.maxentropy import logsumexp
else:
from scipy.misc import logsumexp
from galpy.orbit import Orbit
from galpy.util import bovy_coords, fast_cholesky_invert, \
bovy_conversion, multi, bovy_plot, stable_cho_factor, bovy_ars
import warnings
from galpy.util import galpyWarning
_INTERPDURINGSETUP= True
_USEINTERP= True
_USESIMPLE= True
_labelDict= {'x': r'$X$',
'y': r'$Y$',
'z': r'$Z$',
'r': r'$R$',
'phi': r'$\phi$',
'vx':r'$V_X$',
'vy':r'$V_Y$',
'vz':r'$V_Z$',
'vr':r'$V_R$',
'vt':r'$V_T$',
'll':r'$\mathrm{Galactic\ longitude\, (deg)}$',
'bb':r'$\mathrm{Galactic\ latitude\, (deg)}$',
'dist':r'$\mathrm{distance\, (kpc)}$',
'pmll':r'$\mu_l\,(\mathrm{mas\,yr}^{-1})$',
'pmbb':r'$\mu_b\,(\mathrm{mas\,yr}^{-1})$',
'vlos':r'$V_{\mathrm{los}}\,(\mathrm{km\,s}^{-1})$'}
class streamdf(object):
"""The DF of a tidal stream"""
def __init__(self,sigv,progenitor=None,pot=None,aA=None,
tdisrupt=None,sigMeanOffset=6.,leading=True,
sigangle=None,
deltaAngleTrack=None,nTrackChunks=None,nTrackIterations=None,
progIsTrack=False,
Vnorm=220.,Rnorm=8.,
R0=8.,Zsun=0.025,vsun=[-11.1,8.*30.24,7.25],
multi=None,interpTrack=_INTERPDURINGSETUP,
useInterp=_USEINTERP,nosetup=False):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
sigv - radial velocity dispersion of the progenitor
tdisrupt= (5 Gyr) time since start of disruption (natural units)
leading= (True) if True, model the leading part of the stream
if False, model the trailing part
progenitor= progenitor orbit as Orbit instance (will be re-integrated, so don't bother integrating the orbit before)
progIsTrack= (False) if True, then the progenitor (x,v) is actually the (x,v) of the stream track at zero angle separation; useful when initializing with an orbit fit; the progenitor's position will be calculated
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
sigMeanOffset= (6.) offset between the mean of the frequencies
and the progenitor, in units of the largest
eigenvalue of the frequency covariance matrix
(along the largest eigenvector), should be positive;
to model the trailing part, set leading=False
sigangle= (sigv/122/[1km/s]=1.8sigv in natural coordinates)
estimate of the angle spread of the debris initially
deltaAngleTrack= (None) angle to estimate the stream track over (rad)
nTrackChunks= (floor(deltaAngleTrack/0.15)+1) number of chunks to divide the progenitor track in
nTrackIterations= Number of iterations to perform when establishing the track; each iteration starts from a previous approximation to the track in (x,v) and calculates a new track based on the deviation between the previous track and the desired track in action-angle coordinates; if not set, an appropriate value is determined based on the magnitude of the misalignment between stream and orbit, with larger numbers of iterations for larger misalignments
interpTrack= (might change), interpolate the stream track while
setting up the instance (can be done by hand by
calling self._interpolate_stream_track() and
self._interpolate_stream_track_aA())
useInterp= (might change), use interpolation by default when
calculating approximated frequencies and angles
nosetup= (False) if True, don't setup the stream track and anything
else that is expensive
multi= (None) if set, use multi-processing
Coordinate transformation inputs:
Vnorm= (220) circular velocity to normalize velocities with
Rnorm= (8) Galactocentric radius to normalize positions with
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
object
HISTORY:
2013-09-16 - Started - Bovy (IAS)
2013-11-25 - Started over - Bovy (IAS)
"""
self._sigv= sigv
if tdisrupt is None:
self._tdisrupt= 5./bovy_conversion.time_in_Gyr(Vnorm,Rnorm)
else:
self._tdisrupt= tdisrupt
self._sigMeanOffset= sigMeanOffset
if pot is None: #pragma: no cover
raise IOError("pot= must be set")
self._pot= pot
self._aA= aA
if not self._aA._pot == self._pot:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
if (multi is True): #if set to boolean, enable cpu_count processes
self._multi= multiprocessing.cpu_count()
else:
self._multi= multi
self._progenitor_setup(progenitor,leading)
self._offset_setup(sigangle,leading,deltaAngleTrack)
# if progIsTrack, calculate the progenitor that gives a track that is approximately the given orbit
if progIsTrack:
self._setup_progIsTrack()
self._setup_coord_transform(Rnorm,Vnorm,R0,Zsun,vsun,progenitor)
#Determine the stream track
if not nosetup:
self._determine_nTrackIterations(nTrackIterations)
self._determine_stream_track(nTrackChunks)
self._useInterp= useInterp
if interpTrack or self._useInterp:
self._interpolate_stream_track()
self._interpolate_stream_track_aA()
self.calc_stream_lb()
self._determine_stream_spread()
return None
def _progenitor_setup(self,progenitor,leading):
"""The part of the setup relating to the progenitor's orbit"""
#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor
self._progenitor= progenitor() #call to get new Orbit
# Make sure we do not use physical coordinates
self._progenitor.turn_physical_off()
acfs= self._aA.actionsFreqsAngles(self._progenitor,maxn=3,
_firstFlip=(not leading))
self._progenitor_jr= acfs[0][0]
self._progenitor_lz= acfs[1][0]
self._progenitor_jz= acfs[2][0]
self._progenitor_Omegar= acfs[3]
self._progenitor_Omegaphi= acfs[4]
self._progenitor_Omegaz= acfs[5]
self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3)
self._progenitor_angler= acfs[6]
self._progenitor_anglephi= acfs[7]
self._progenitor_anglez= acfs[8]
self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3)
#Calculate dO/dJ Jacobian at the progenitor
self._dOdJp= calcaAJac(self._progenitor._orb.vxvv,
self._aA,dxv=None,dOdJ=True,
_initacfs=acfs)
self._dOdJpEig= numpy.linalg.eig(self._dOdJp)
return None
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
#Estimate the frequency covariance matrix from a diagonal J matrix x dOdJ
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
#Estimate angle spread as the ratio of the largest to the middle eigenvalue
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
#Estimate the frequency mean as lying along the direction of the largest eigenvalue
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
#Make sure we are modeling the correct part of the stream
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
#numpy.dot(self._dOdJp,
# numpy.array([self._sigjr,self._siglz,self._sigjz]))
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
#Store cholesky of sigomatrix for fast evaluation
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None
def _setup_coord_transform(self,Rnorm,Vnorm,R0,Zsun,vsun,progenitor):
#Set the coordinate-transformation parameters; check that these do not conflict with those in the progenitor orbit object; need to use the original, since this objects _progenitor has physical turned off
if progenitor._roSet \
and (numpy.fabs(Rnorm-progenitor._orb._ro) > 10.**-.8 \
or numpy.fabs(R0-progenitor._orb._ro) > 10.**-8.):
warnings.warn("Warning: progenitor's ro does not agree with streamdf's Rnorm and R0; this may have unexpected consequences when projecting into observables", galpyWarning)
if progenitor._voSet \
and numpy.fabs(Vnorm-progenitor._orb._vo) > 10.**-8.:
warnings.warn("Warning: progenitor's vo does not agree with streamdf's Vnorm; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.fabs(Zsun-progenitor._orb._zo) > 10.**-8.:
warnings.warn("Warning: progenitor's zo does not agree with streamdf's Zsun; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.any(numpy.fabs(vsun-numpy.array([0.,Vnorm,0.])\
-progenitor._orb._solarmotion) > 10.**-8.):
warnings.warn("Warning: progenitor's solarmotion does not agree with streamdf's vsun (after accounting for Vnorm); this may have unexpected consequences when projecting into observables", galpyWarning)
self._Vnorm= Vnorm
self._Rnorm= Rnorm
self._R0= R0
self._Zsun= Zsun
self._vsun= vsun
return None
def _setup_progIsTrack(self):
"""If progIsTrack, the progenitor orbit that was passed to the
streamdf initialization is the track at zero angle separation;
this routine computes an actual progenitor position that gives
the desired track given the parameters of the streamdf"""
# We need to flip the sign of the offset, to go to the progenitor
self._sigMeanSign*= -1.
# Use _determine_stream_track_single to calculate the track-progenitor
# offset at zero angle separation
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
0.) #angle = 0
# Setup the new progenitor orbit
progenitor= Orbit(prog_stream_offset[3])
# Flip the offset sign again
self._sigMeanSign*= -1.
# Now re-do the previous setup
self._progenitor_setup(progenitor,self._leading)
self._offset_setup(self._sigangle,self._leading,
self._deltaAngleTrack)
return None
def misalignment(self,isotropic=False):
"""
NAME:
misalignment
PURPOSE:
calculate the misalignment between the progenitor's frequency
and the direction along which the stream disrupts
INPUT:
isotropic= (False), if True, return the misalignment assuming an isotropic action distribution
OUTPUT:
misalignment in degree
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
dODir= self._dOdJpEig[1][:,numpy.argmax(numpy.fabs(self._dOdJpEig[0]))]
else:
dODir= self._dsigomeanProgDirection
out= numpy.arccos(numpy.sum(self._progenitor_Omega*dODir)/numpy.sqrt(numpy.sum(self._progenitor_Omega**2.)))/numpy.pi*180.
if out > 90.: return out-180.
else: return out
def freqEigvalRatio(self,isotropic=False):
"""
NAME:
freqEigvalRatio
PURPOSE:
calculate the ratio between the largest and 2nd-to-largest (in abs)
eigenvalue of sqrt(dO/dJ^T V_J dO/dJ)
(if this is big, a 1D stream will form)
INPUT:
isotropic= (False), if True, return the ratio assuming an isotropic action distribution (i.e., just of dO/dJ)
OUTPUT:
ratio between eigenvalues of |dO / dJ|
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
sortedEig= sorted(numpy.fabs(self._dOdJpEig[0]))
return sortedEig[2]/sortedEig[1]
else:
return numpy.sqrt(self._sortedSigOEig)[2]\
/numpy.sqrt(self._sortedSigOEig)[1]
def estimateTdisrupt(self,deltaAngle):
"""
NAME:
estimateTdisrupt
PURPOSE:
estimate the time of disruption
INPUT:
deltaAngle- spread in angle since disruption
OUTPUT:
time in natural units
HISTORY:
2013-11-27 - Written - Bovy (IAS)
"""
return deltaAngle\
/numpy.sqrt(numpy.sum(self._dsigomeanProg**2.))
############################STREAM TRACK FUNCTIONS#############################
def plotTrack(self,d1='x',d2='z',interp=True,spread=0,simple=_USESIMPLE,
*args,**kwargs):
"""
NAME:
plotTrack
PURPOSE:
plot the stream track
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
interp= (True) if True, use the interpolated stream track
spread= (0) if int > 0, also plot the spread around the track as spread x sigma
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
simple= (False), if True, use a simple estimate for the spread in perpendicular angle
bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
if not hasattr(self,'_ObsTrackLB') and \
(d1.lower() == 'll' or d1.lower() == 'bb'
or d1.lower() == 'dist' or d1.lower() == 'pmll'
or d1.lower() == 'pmbb' or d1.lower() == 'vlos'
or d2.lower() == 'll' or d2.lower() == 'bb'
or d2.lower() == 'dist' or d2.lower() == 'pmll'
or d2.lower() == 'pmbb' or d2.lower() == 'vlos'):
self.calc_stream_lb()
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_track_dim(d1,interp=interp,phys=phys)
ty= self._parse_track_dim(d2,interp=interp,phys=phys)
bovy_plot.bovy_plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
if spread:
addx, addy= self._parse_track_spread(d1,d2,interp=interp,phys=phys,
simple=simple)
if ('ls' in kwargs and kwargs['ls'] == 'none') \
or ('linestyle' in kwargs \
and kwargs['linestyle'] == 'none'):
kwargs.pop('ls',None)
kwargs.pop('linestyle',None)
spreadls= 'none'
else:
spreadls= '-.'
spreadmarker= kwargs.pop('marker',None)
spreadcolor= kwargs.pop('color',None)
spreadlw= kwargs.pop('lw',1.)
bovy_plot.bovy_plot(tx+spread*addx,ty+spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
bovy_plot.bovy_plot(tx-spread*addx,ty-spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
return None
def plotProgenitor(self,d1='x',d2='z',*args,**kwargs):
"""
NAME:
plotProgenitor
PURPOSE:
plot the progenitor orbit
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
tts= self._progenitor._orb.t[self._progenitor._orb.t \
< self._trackts[self._nTrackChunks-1]]
obs= [self._R0,0.,self._Zsun]
obs.extend(self._vsun)
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_progenitor_dim(d1,tts,ro=self._Rnorm,vo=self._Vnorm,
obs=obs,phys=phys)
ty= self._parse_progenitor_dim(d2,tts,ro=self._Rnorm,vo=self._Vnorm,
obs=obs,phys=phys)
bovy_plot.bovy_plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
return None
def _parse_track_dim(self,d1,interp=True,phys=False):
"""Parse the dimension to plot the stream track for"""
if interp: interpStr= 'interpolated'
else: interpStr= ''
if d1.lower() == 'x':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,0]
elif d1.lower() == 'y':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,1]
elif d1.lower() == 'z':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,2]
elif d1.lower() == 'r':
tx= self.__dict__['_%sObsTrack' % interpStr][:,0]
elif d1.lower() == 'phi':
tx= self.__dict__['_%sObsTrack' % interpStr][:,5]
elif d1.lower() == 'vx':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,3]
elif d1.lower() == 'vy':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,4]
elif d1.lower() == 'vz':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,5]
elif d1.lower() == 'vr':
tx= self.__dict__['_%sObsTrack' % interpStr][:,1]
elif d1.lower() == 'vt':
tx= self.__dict__['_%sObsTrack' % interpStr][:,2]
elif d1.lower() == 'll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,0]
elif d1.lower() == 'bb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,1]
elif d1.lower() == 'dist':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,2]
elif d1.lower() == 'pmll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,4]
elif d1.lower() == 'pmbb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,5]
elif d1.lower() == 'vlos':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,3]
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._Rnorm
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._Vnorm
return tx
def _parse_progenitor_dim(self,d1,ts,ro=None,vo=None,obs=None,
phys=False):
"""Parse the dimension to plot the progenitor orbit for"""
if d1.lower() == 'x':
tx= self._progenitor.x(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'y':
tx= self._progenitor.y(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'z':
tx= self._progenitor.z(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'r':
tx= self._progenitor.R(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'phi':
tx= self._progenitor.phi(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vx':
tx= self._progenitor.vx(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vy':
tx= self._progenitor.vy(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vz':
tx= self._progenitor.vz(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vr':
tx= self._progenitor.vR(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vt':
tx= self._progenitor.vT(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'll':
tx= self._progenitor.ll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'bb':
tx= self._progenitor.bb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'dist':
tx= self._progenitor.dist(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmll':
tx= self._progenitor.pmll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmbb':
tx= self._progenitor.pmbb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vlos':
tx= self._progenitor.vlos(ts,ro=ro,vo=vo,obs=obs)
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._Rnorm
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._Vnorm
return tx
def _parse_track_spread(self,d1,d2,interp=True,phys=False,
simple=_USESIMPLE):
"""Determine the spread around the track"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
okaySpreadR= ['r','vr','vt','z','vz','phi']
okaySpreadXY= ['x','y','z','vx','vy','vz']
okaySpreadLB= ['ll','bb','dist','vlos','pmll','pmbb']
#Determine which coordinate system we're in
coord= [False,False,False] #R, XY, LB
if d1.lower() in okaySpreadR and d2.lower() in okaySpreadR:
coord[0]= True
elif d1.lower() in okaySpreadXY and d2.lower() in okaySpreadXY:
coord[1]= True
elif d1.lower() in okaySpreadLB and d2.lower() in okaySpreadLB:
coord[2]= True
else:
raise NotImplementedError("plotting the spread for coordinates from different systems not implemented yet ...")
#Get the right 2D Jacobian
indxDict= {}
indxDict['r']= 0
indxDict['vr']= 1
indxDict['vt']= 2
indxDict['z']= 3
indxDict['vz']= 4
indxDict['phi']= 5
indxDictXY= {}
indxDictXY['x']= 0
indxDictXY['y']= 1
indxDictXY['z']= 2
indxDictXY['vx']= 3
indxDictXY['vy']= 4
indxDictXY['vz']= 5
indxDictLB= {}
indxDictLB['ll']= 0
indxDictLB['bb']= 1
indxDictLB['dist']= 2
indxDictLB['vlos']= 3
indxDictLB['pmll']= 4
indxDictLB['pmbb']= 5
if coord[0]:
relevantCov= self._allErrCovs
relevantDict= indxDict
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._Rnorm,self._Vnorm,self._Vnorm,
self._Rnorm,self._Vnorm,1.])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[1]:
relevantCov= self._allErrCovsXY
relevantDict= indxDictXY
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._Rnorm,self._Rnorm,self._Rnorm,
self._Vnorm,self._Vnorm,self._Vnorm])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[2]:
relevantCov= self._allErrCovsLBUnscaled
relevantDict= indxDictLB
indx0= numpy.array([[relevantDict[d1.lower()],relevantDict[d1.lower()]],
[relevantDict[d2.lower()],relevantDict[d2.lower()]]])
indx1= numpy.array([[relevantDict[d1.lower()],relevantDict[d2.lower()]],
[relevantDict[d1.lower()],relevantDict[d2.lower()]]])
cov= relevantCov[:,indx0,indx1] #cov contains all nTrackChunks covs
if not interp:
out= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
out[ii]= minEigvec*numpy.sqrt(covEig[0][minIndx])
eigDir= minEigvec
else:
#We slerp the minor eigenvector and interpolate the eigenvalue
#First store all of the eigenvectors on the track
allEigval= numpy.empty(self._nTrackChunks)
allEigvec= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
allEigval[ii]= numpy.sqrt(covEig[0][minIndx])
allEigvec[ii]= minEigvec
eigDir= minEigvec
#Now interpolate where needed
interpEigval=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allEigval,k=3)
interpolatedEigval= interpEigval(self._interpolatedThetasTrack)
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
2))
for ii in range(self._nTrackChunks-1):
slerpOmega= numpy.arccos(numpy.sum(allEigvec[ii]*allEigvec[ii+1]))
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(2):
interpolatedEigvec[slerpIndx,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmega)*allEigvec[ii,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmega)*allEigvec[ii+1,jj])/numpy.sin(slerpOmega)
out= numpy.tile(interpolatedEigval.T,(2,1)).T*interpolatedEigvec
if coord[2]: #if LB, undo rescalings that were applied before
out[:,0]*= self._ErrCovsLBScale[relevantDict[d1.lower()]]
out[:,1]*= self._ErrCovsLBScale[relevantDict[d2.lower()]]
return (out[:,0],out[:,1])
def plotCompareTrackAAModel(self,**kwargs):
"""
NAME:
plotCompareTrackAAModel
PURPOSE:
plot the comparison between the underlying model's dOmega_perp vs. dangle_r (line) and the track in (x,v)'s dOmega_perp vs. dangle_r (dots; explicitly calculating the track's action-angle coordinates)
INPUT:
bovy_plot.bovy_plot kwargs
OUTPUT:
plot
HISTORY:
2014-08-27 - Written - Bovy (IAS)
"""
#First calculate the model
model_adiff= (self._ObsTrackAA[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
model_operp= numpy.dot(self._ObsTrackAA[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
#Then calculate the track's frequency-angle coordinates
if self._multi is None:
aatrack= numpy.empty((self._nTrackChunks,6))
for ii in range(self._nTrackChunks):
aatrack[ii]= self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[ii,:]),
maxn=3)[3:]
else:
aatrack= numpy.reshape(\
multi.parallel_map(
(lambda x: self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[x,:]), maxn=3)[3:]),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi])),(self._nTrackChunks,6))
track_adiff= (aatrack[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
track_operp= numpy.dot(aatrack[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
overplot= kwargs.pop('overplot',False)
yrange= kwargs.pop('yrange',
[0.,numpy.amax(numpy.hstack((model_operp,track_operp)))*1.1])
xlabel= kwargs.pop('xlabel',r'$\Delta \theta_R$')
ylabel= kwargs.pop('ylabel',r'$\Delta \Omega_\parallel$')
bovy_plot.bovy_plot(model_adiff,model_operp,'k-',overplot=overplot,
xlabel=xlabel,ylabel=ylabel,yrange=yrange,**kwargs)
bovy_plot.bovy_plot(track_adiff,track_operp,'ko',overplot=True,
**kwargs)
return None
def _determine_nTrackIterations(self,nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment()) < 1.:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment()) >= 1. \
and numpy.fabs(self.misalignment()) < 3.:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment()) >= 3.:
self.nTrackIterations= 2
return None
def _determine_stream_track(self,nTrackChunks):
"""Determine the track of the stream in real space"""
#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream
if nTrackChunks is None:
#default is floor(self._deltaAngleTrack/0.15)+1
self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1
else:
self._nTrackChunks= nTrackChunks
dt= self._deltaAngleTrack\
/self._progenitor_Omega_along_dOmega
self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it
#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
0.) #angle = 0
auxiliaryTrack= Orbit(prog_stream_offset[3])
if dt < 0.:
self._trackts= numpy.linspace(0.,-2.*dt,2.*self._nTrackChunks-1)
#Flip velocities before integrating
auxiliaryTrack= auxiliaryTrack.flip()
auxiliaryTrack.integrate(self._trackts,self._pot)
if dt < 0.:
#Flip velocities again
auxiliaryTrack._orb.orbit[:,1]= -auxiliaryTrack._orb.orbit[:,1]
auxiliaryTrack._orb.orbit[:,2]= -auxiliaryTrack._orb.orbit[:,2]
auxiliaryTrack._orb.orbit[:,4]= -auxiliaryTrack._orb.orbit[:,4]
#Calculate the actions, frequencies, and angle for this auxiliary orbit
acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),maxn=3)
auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
auxiliary_Omega_along_dOmega= \
numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
allAcfsTrack= numpy.empty((self._nTrackChunks,9))
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
auxiliaryTrack,
self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack,
self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega),
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Repeat the track calculation using the previous track, to get closer to it
for nn in range(self.nTrackIterations):
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
Orbit(ObsTrack[ii,:]),
0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Store the track
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._allAcfsTrack= allAcfsTrack
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
#Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
self._ObsTrackXY= numpy.empty_like(self._ObsTrack)
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
bovy_coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
self._ObsTrackXY[:,0]= TrackX
self._ObsTrackXY[:,1]= TrackY
self._ObsTrackXY[:,2]= TrackZ
self._ObsTrackXY[:,3]= TrackvX
self._ObsTrackXY[:,4]= TrackvY
self._ObsTrackXY[:,5]= TrackvZ
return None
def _determine_stream_spread(self,simple=_USESIMPLE):
"""Determine the spread around the stream track, just sets matrices that describe the covariances"""
allErrCovs= numpy.empty((self._nTrackChunks,6,6))
if self._multi is None:
for ii in range(self._nTrackChunks):
allErrCovs[ii]= _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[ii],
self.sigOmega,
lambda y: self.sigangledAngle(y,simple=simple),
self._allinvjacsTrack[ii])
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[x],
self.sigOmega,
lambda y: self.sigangledAngle(y,simple=simple),
self._allinvjacsTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allErrCovs[ii]= multiOut[ii]
self._allErrCovs= allErrCovs
#Also propagate to XYZ coordinates
allErrCovsXY= numpy.empty_like(self._allErrCovs)
allErrCovsEigvalXY= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecXY= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjac= bovy_coords.cyl_to_rect_jac(*self._ObsTrack[ii])
allErrCovsXY[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovs[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsXY[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalXY[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecXY[ii]= teig[1][:,sortIndx]
self._allErrCovsXY= allErrCovsXY
#Interpolate the allErrCovsXY covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalXY=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalXY[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsXY= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalXY[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecXY[ii,:,jj]*allErrCovsEigvecXY[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecXY[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecXY[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsXY[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsXY= interpolatedAllErrCovsXY
#Also interpolate in l and b coordinates
self._determine_stream_spreadLB(simple=simple)
return None
def _determine_stream_spreadLB(self,simple=_USESIMPLE,
Rnorm=None,Vnorm=None,
R0=None,Zsun=None,vsun=None):
"""Determine the spread in the stream in observable coordinates"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
if Rnorm is None:
Rnorm= self._Rnorm
if Vnorm is None:
Vnorm= self._Vnorm
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
allErrCovsLB= numpy.empty_like(self._allErrCovs)
obs= [R0,0.,Zsun]
obs.extend(vsun)
obskwargs= {}
obskwargs['ro']= Rnorm
obskwargs['vo']= Vnorm
obskwargs['obs']= obs
self._ErrCovsLBScale= [180.,90.,
self._progenitor.dist(**obskwargs),
numpy.fabs(self._progenitor.vlos(**obskwargs)),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.)]
allErrCovsEigvalLB= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecLB= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjacXY= bovy_coords.galcenrect_to_XYZ_jac(*self._ObsTrackXY[ii])
tjacLB= bovy_coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
tjacLB[:3,:]/= Rnorm
tjacLB[3:,:]/= Vnorm
for jj in range(6):
tjacLB[:,jj]*= self._ErrCovsLBScale[jj]
tjac= numpy.dot(numpy.linalg.inv(tjacLB),tjacXY)
allErrCovsLB[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovsXY[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsLB[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalLB[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecLB[ii]= teig[1][:,sortIndx]
self._allErrCovsLBUnscaled= allErrCovsLB
#Interpolate the allErrCovsLB covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalLB=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalLB[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsLB= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalLB[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecLB[ii,:,jj]*allErrCovsEigvecLB[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecLB[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecLB[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsLB[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsLBUnscaled= interpolatedAllErrCovsLB
#Also calculate the (l,b,..) -> (X,Y,..) Jacobian at all of the interpolated and not interpolated points
trackLogDetJacLB= numpy.empty_like(self._thetasTrack)
interpolatedTrackLogDetJacLB=\
numpy.empty_like(self._interpolatedThetasTrack)
for ii in range(self._nTrackChunks):
tjacLB= bovy_coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
trackLogDetJacLB[ii]= numpy.log(numpy.linalg.det(tjacLB))
self._trackLogDetJacLB= trackLogDetJacLB
for ii in range(len(self._interpolatedThetasTrack)):
tjacLB=\
bovy_coords.lbd_to_XYZ_jac(*self._interpolatedObsTrackLB[ii],
degree=True)
interpolatedTrackLogDetJacLB[ii]=\
numpy.log(numpy.linalg.det(tjacLB))
self._interpolatedTrackLogDetJacLB= interpolatedTrackLogDetJacLB
return None
def _interpolate_stream_track(self):
"""Build interpolations of the stream track"""
if hasattr(self,'_interpolatedThetasTrack'):
return None #Already did this
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
bovy_coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
#Interpolate
self._interpTrackX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackX,k=3)
self._interpTrackY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackY,k=3)
self._interpTrackZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackZ,k=3)
self._interpTrackvX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvX,k=3)
self._interpTrackvY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvY,k=3)
self._interpTrackvZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvZ,k=3)
#Now store an interpolated version of the stream track
self._interpolatedThetasTrack=\
numpy.linspace(0.,self._deltaAngleTrack,1001)
self._interpolatedObsTrackXY= numpy.empty((len(self._interpolatedThetasTrack),6))
self._interpolatedObsTrackXY[:,0]=\
self._interpTrackX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,1]=\
self._interpTrackY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,2]=\
self._interpTrackZ(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,3]=\
self._interpTrackvX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,4]=\
self._interpTrackvY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,5]=\
self._interpTrackvZ(self._interpolatedThetasTrack)
#Also in cylindrical coordinates
self._interpolatedObsTrack= \
numpy.empty((len(self._interpolatedThetasTrack),6))
tR,tphi,tZ= bovy_coords.rect_to_cyl(self._interpolatedObsTrackXY[:,0],
self._interpolatedObsTrackXY[:,1],
self._interpolatedObsTrackXY[:,2])
tvR,tvT,tvZ=\
bovy_coords.rect_to_cyl_vec(self._interpolatedObsTrackXY[:,3],
self._interpolatedObsTrackXY[:,4],
self._interpolatedObsTrackXY[:,5],
tR,tphi,tZ,cyl=True)
self._interpolatedObsTrack[:,0]= tR
self._interpolatedObsTrack[:,1]= tvR
self._interpolatedObsTrack[:,2]= tvT
self._interpolatedObsTrack[:,3]= tZ
self._interpolatedObsTrack[:,4]= tvZ
self._interpolatedObsTrack[:,5]= tphi
return None
def _interpolate_stream_track_aA(self):
"""Build interpolations of the stream track in action-angle coordinates"""
if hasattr(self,'_interpolatedObsTrackAA'):
return None #Already did this
#Calculate 1D meanOmega on a fine grid in angle and interpolate
if not hasattr(self,'_interpolatedThetasTrack'):
self._interpolate_stream_track()
dmOs= numpy.array([self.meanOmega(da,oned=True)
for da in self._interpolatedThetasTrack])
self._interpTrackAAdmeanOmegaOneD=\
interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,dmOs,k=3)
#Build the interpolated AA
self._interpolatedObsTrackAA=\
numpy.empty((len(self._interpolatedThetasTrack),6))
for ii in range(len(self._interpolatedThetasTrack)):
self._interpolatedObsTrackAA[ii,:3]=\
self._progenitor_Omega+dmOs[ii]*self._dsigomeanProgDirection\
*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
self._progenitor_angle+self._interpolatedThetasTrack[ii]\
*self._dsigomeanProgDirection*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
numpy.mod(self._interpolatedObsTrackAA[ii,3:],2.*numpy.pi)
return None
def calc_stream_lb(self,
Vnorm=None,Rnorm=None,
R0=None,Zsun=None,vsun=None):
"""
NAME:
calc_stream_lb
PURPOSE:
convert the stream track to observational coordinates and store
INPUT:
Coordinate transformation inputs (all default to the instance-wide
values):
Vnorm= circular velocity to normalize velocities with
Rnorm= Galactocentric radius to normalize positions with
R0= Galactocentric radius of the Sun (kpc)
Zsun= Sun's height above the plane (kpc)
vsun= Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
(none)
HISTORY:
2013-12-02 - Written - Bovy (IAS)
"""
if Vnorm is None:
Vnorm= self._Vnorm
if Rnorm is None:
Rnorm= self._Rnorm
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
self._ObsTrackLB= numpy.empty_like(self._ObsTrack)
XYZ= bovy_coords.galcencyl_to_XYZ(self._ObsTrack[:,0]*Rnorm,
self._ObsTrack[:,5],
self._ObsTrack[:,3]*Rnorm,
Xsun=R0,Zsun=Zsun)
vXYZ= bovy_coords.galcencyl_to_vxvyvz(self._ObsTrack[:,1]*Vnorm,
self._ObsTrack[:,2]*Vnorm,
self._ObsTrack[:,4]*Vnorm,
self._ObsTrack[:,5],
vsun=vsun)
slbd=bovy_coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= bovy_coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],slbd[:,2],
degree=True)
self._ObsTrackLB[:,0]= slbd[:,0]
self._ObsTrackLB[:,1]= slbd[:,1]
self._ObsTrackLB[:,2]= slbd[:,2]
self._ObsTrackLB[:,3]= svlbd[:,0]
self._ObsTrackLB[:,4]= svlbd[:,1]
self._ObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_interpolatedObsTrackXY'):
#Do the same for the interpolated track
self._interpolatedObsTrackLB=\
numpy.empty_like(self._interpolatedObsTrackXY)
XYZ=\
bovy_coords.galcenrect_to_XYZ(\
self._interpolatedObsTrackXY[:,0]*Rnorm,
self._interpolatedObsTrackXY[:,1]*Rnorm,
self._interpolatedObsTrackXY[:,2]*Rnorm,
Xsun=R0,Zsun=Zsun)
vXYZ=\
bovy_coords.galcenrect_to_vxvyvz(\
self._interpolatedObsTrackXY[:,3]*Vnorm,
self._interpolatedObsTrackXY[:,4]*Vnorm,
self._interpolatedObsTrackXY[:,5]*Vnorm,
vsun=vsun)
slbd=bovy_coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= bovy_coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
self._interpolatedObsTrackLB[:,0]= slbd[:,0]
self._interpolatedObsTrackLB[:,1]= slbd[:,1]
self._interpolatedObsTrackLB[:,2]= slbd[:,2]
self._interpolatedObsTrackLB[:,3]= svlbd[:,0]
self._interpolatedObsTrackLB[:,4]= svlbd[:,1]
self._interpolatedObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_allErrCovsLBUnscaled'):
#Re-calculate this
self._determine_stream_spreadLB(simple=_USESIMPLE,
Vnorm=Vnorm,Rnorm=Rnorm,
R0=R0,Zsun=Zsun,vsun=vsun)
return None
def _find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""For backward compatibility"""
return self.find_closest_trackpoint(R,vR,vT,z,vz,phi,
interp=interp,xy=xy,
usev=usev)
def find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""
NAME:
find_closest_trackpoint
PURPOSE:
find the closest point on the stream track to a given point
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
xy= (False) if True, input is X,Y,Z,vX,vY,vZ in Galactocentric rectangular coordinates; if xy, some coordinates may be missing (given as None) and they will not be used
usev= (False) if True, also use velocities to find the closest point
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
if xy:
X= R
Y= vR
Z= vT
else:
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if xy and usev:
vX= z
vY= vz
vZ= phi
elif usev:
vX= vR*numpy.cos(phi)-vT*numpy.sin(phi)
vY= vR*numpy.sin(phi)+vT*numpy.cos(phi)
vZ= vz
present= [not X is None,not Y is None,not Z is None]
if usev: present.extend([not vX is None,not vY is None,not vZ is None])
present= numpy.array(present,dtype='float')
if X is None: X= 0.
if Y is None: Y= 0.
if Z is None: Z= 0.
if usev and vX is None: vX= 0.
if usev and vY is None: vY= 0.
if usev and vZ is None: vZ= 0.
if interp:
dist2= present[0]*(X-self._interpolatedObsTrackXY[:,0])**2.\
+present[1]*(Y-self._interpolatedObsTrackXY[:,1])**2.\
+present[2]*(Z-self._interpolatedObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._interpolatedObsTrackXY[:,3])**2.\
+present[4]*(vY-self._interpolatedObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._interpolatedObsTrackXY[:,5])**2.
else:
dist2= present[0]*(X-self._ObsTrackXY[:,0])**2.\
+present[1]*(Y-self._ObsTrackXY[:,1])**2.\
+present[2]*(Z-self._ObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._ObsTrackXY[:,3])**2.\
+present[4]*(vY-self._ObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._ObsTrackXY[:,5])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
return self.find_closest_trackpointLB(l,b,D,vlos,pmll,pmbb,
interp=interp,
usev=usev)
def find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
"""
NAME:
find_closest_trackpointLB
PURPOSE:
find the closest point on the stream track to a given point in (l,b,...) coordinates
INPUT:
l,b,D,vlos,pmll,pmbb- coordinates in (deg,deg,kpc,km/s,mas/yr,mas/yr)
interp= (True) if True, return the closest index on the interpolated track
usev= (False) if True, also use the velocity components (default is to only use the positions)
OUTPUT:
index of closest track point on the interpolated or not-interpolated track
HISTORY:
2013-12-17- Written - Bovy (IAS)
"""
if interp:
nTrackPoints= len(self._interpolatedThetasTrack)
else:
nTrackPoints= len(self._thetasTrack)
if l is None:
l= 0.
trackL= numpy.zeros(nTrackPoints)
elif interp:
trackL= self._interpolatedObsTrackLB[:,0]
else:
trackL= self._ObsTrackLB[:,0]
if b is None:
b= 0.
trackB= numpy.zeros(nTrackPoints)
elif interp:
trackB= self._interpolatedObsTrackLB[:,1]
else:
trackB= self._ObsTrackLB[:,1]
if D is None:
D= 1.
trackD= numpy.ones(nTrackPoints)
elif interp:
trackD= self._interpolatedObsTrackLB[:,2]
else:
trackD= self._ObsTrackLB[:,2]
if usev:
if vlos is None:
vlos= 0.
trackVlos= numpy.zeros(nTrackPoints)
elif interp:
trackVlos= self._interpolatedObsTrackLB[:,3]
else:
trackVlos= self._ObsTrackLB[:,3]
if pmll is None:
pmll= 0.
trackPmll= numpy.zeros(nTrackPoints)
elif interp:
trackPmll= self._interpolatedObsTrackLB[:,4]
else:
trackPmll= self._ObsTrackLB[:,4]
if pmbb is None:
pmbb= 0.
trackPmbb= numpy.zeros(nTrackPoints)
elif interp:
trackPmbb= self._interpolatedObsTrackLB[:,5]
else:
trackPmbb= self._ObsTrackLB[:,5]
#Calculate rectangular coordinates
XYZ= bovy_coords.lbd_to_XYZ(l,b,D,degree=True)
trackXYZ= bovy_coords.lbd_to_XYZ(trackL,trackB,trackD,degree=True)
if usev:
vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(vlos,pmll,pmbb,
XYZ[0],XYZ[1],XYZ[2],
XYZ=True)
trackvxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(trackVlos,trackPmll,
trackPmbb,
trackXYZ[:,0],
trackXYZ[:,1],
trackXYZ[:,2],
XYZ=True)
#Calculate distance
dist2= (XYZ[0]-trackXYZ[:,0])**2.\
+(XYZ[1]-trackXYZ[:,1])**2.\
+(XYZ[2]-trackXYZ[:,2])**2.
if usev:
dist2+= (vxvyvz[0]-trackvxvyvz[:,0])**2.\
+(vxvyvz[1]-trackvxvyvz[:,1])**2.\
+(vxvyvz[2]-trackvxvyvz[:,2])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_find_closest_trackpointaA
PURPOSE:
find the closest point on the stream track to a given point in
frequency-angle coordinates
INPUT:
Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
#Calculate angle offset along the stream parallel to the stream track
angle= numpy.hstack((ar,ap,az))
da= angle-self._progenitor_angle
dapar= self._sigMeanSign*numpy.sum(da*self._dsigomeanProgDirection)
if interp:
dist= numpy.fabs(dapar-self._interpolatedThetasTrack)
else:
dist= numpy.fabs(dapar-self._thetasTrack)
return numpy.argmin(dist)
#########DISTRIBUTION AS A FUNCTION OF ANGLE ALONG THE STREAM##################
def meanOmega(self,dangle,oned=False):
"""
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
OUTPUT:
mean Omega
HISTORY:
2013-12-01 - Written - Bovy (IAS)
"""
dOmin= dangle/self._tdisrupt
meandO= self._meandO
dO1D= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO)
if oned: return dO1D
else:
return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\
*self._sigMeanSign
def sigOmega(self,dangle):
"""
NAME:
sigmaOmega
PURPOSE:
calculate the 1D sigma in frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
OUTPUT:
sigma Omega
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
dOmin= dangle/self._tdisrupt
meandO= self._meandO
sO1D2= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*(meandO+dOmin)\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO**2.+self._sortedSigOEig[2])
mO= self.meanOmega(dangle,oned=True)
return numpy.sqrt(sO1D2-mO**2.)
def ptdAngle(self,t,dangle):
"""
NAME:
ptdangle
PURPOSE:
return the probability of a given stripping time at a given angle along the stream
INPUT:
t - stripping time
dangle - angle offset along the stream
OUTPUT:
p(td|dangle)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isinstance(t,(int,float,numpy.float32,numpy.float64)):
t= numpy.array([t])
out= numpy.zeros(len(t))
if t > 0.:
dO= dangle/t[t < self._tdisrupt]
else:
return 0.
#p(t|a) = \int dO p(O,t|a) = \int dO p(t|O,a) p(O|a) = \int dO delta (t-a/O)p(O|a) = O*2/a p(O|a); p(O|a) = \int dt p(a|O,t) p(O)p(t) = 1/O p(O)
out[t < self._tdisrupt]=\
dO**2./dangle*numpy.exp(-0.5*(dO-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
def meantdAngle(self,dangle):
"""
NAME:
meantdAngle
PURPOSE:
calculate the mean stripping time at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
mean stripping time at this dangle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
num= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return self._tdisrupt
elif numpy.isnan(denom): return 0.
else: return num/denom
def sigtdAngle(self,dangle):
"""
NAME:
sigtdAngle
PURPOSE:
calculate the dispersion in the stripping times at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
dispersion in the stripping times at this angle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
numsig2= integrate.quad(lambda x: x**2.*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
nummean= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return numpy.nan
else: return numpy.sqrt(numsig2/denom-(nummean/denom)**2.)
def pangledAngle(self,angleperp,dangle,smallest=False):
"""
NAME:
pangledAngle
PURPOSE:
return the probability of a given perpendicular angle at a given angle along the stream
INPUT:
angleperp - perpendicular angle
dangle - angle offset along the stream
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
OUTPUT:
p(angle_perp|dangle)
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if isinstance(angleperp,(int,float,numpy.float32,numpy.float64)):
angleperp= numpy.array([angleperp])
out= numpy.zeros(len(angleperp))
out= numpy.array([\
integrate.quad(self._pangledAnglet,0.,self._tdisrupt,
(ap,dangle,smallest))[0] for ap in angleperp])
return out
def meanangledAngle(self,dangle,smallest=False):
"""
NAME:
meanangledAngle
PURPOSE:
calculate the mean perpendicular angle at a given angle
INPUT:
dangle - angle offset along the stream
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
OUTPUT:
mean perpendicular angle
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if smallest: eigIndx= 0
else: eigIndx= 1
aplow= numpy.amax([numpy.sqrt(self._sortedSigOEig[eigIndx])\
*self._tdisrupt*5.,
self._sigangle])
num= integrate.quad(lambda x: x*self.pangledAngle(x,dangle,smallest),
aplow,-aplow)[0]
denom= integrate.quad(self.pangledAngle,aplow,-aplow,
(dangle,smallest))[0]
if denom == 0.: return numpy.nan
else: return num/denom
def sigangledAngle(self,dangle,assumeZeroMean=True,smallest=False,
simple=False):
"""
NAME:
sigangledAngle
PURPOSE:
calculate the dispersion in the perpendicular angle at a given angle
INPUT:
dangle - angle offset along the stream
assumeZeroMean= (True) if True, assume that the mean is zero (should be)
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
simple= (False), if True, return an even simpler estimate
OUTPUT:
dispersion in the perpendicular angle at this angle
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if smallest: eigIndx= 0
else: eigIndx= 1
if simple:
dt= self.meantdAngle(dangle)
return numpy.sqrt(self._sigangle2
+self._sortedSigOEig[eigIndx]*dt**2.)
aplow= numpy.amax([numpy.sqrt(self._sortedSigOEig[eigIndx])*self._tdisrupt*5.,
self._sigangle])
numsig2= integrate.quad(lambda x: x**2.*self.pangledAngle(x,dangle),
aplow,-aplow)[0]
if not assumeZeroMean:
nummean= integrate.quad(lambda x: x*self.pangledAngle(x,dangle),
aplow,-aplow)[0]
else:
nummean= 0.
denom= integrate.quad(self.pangledAngle,aplow,-aplow,(dangle,))[0]
if denom == 0.: return numpy.nan
else: return numpy.sqrt(numsig2/denom-(nummean/denom)**2.)
def _pangledAnglet(self,t,angleperp,dangle,smallest):
"""p(angle_perp|angle_par,time)"""
if smallest: eigIndx= 0
else: eigIndx= 1
if isinstance(angleperp,(int,float,numpy.float32,numpy.float64)):
angleperp= numpy.array([angleperp])
t= numpy.array([t])
out= numpy.zeros_like(angleperp)
tindx= t < self._tdisrupt
out[tindx]=\
numpy.exp(-0.5*angleperp[tindx]**2.\
/(t[tindx]**2.*self._sortedSigOEig[eigIndx]+self._sigangle2))/\
numpy.sqrt(t[tindx]**2.*self._sortedSigOEig[eigIndx]+self._sigangle2)\
*self.ptdAngle(t[t < self._tdisrupt],dangle)
return out
################APPROXIMATE FREQUENCY-ANGLE TRANSFORMATION#####################
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True):
"""
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
"""
if isinstance(R,(int,float,numpy.float32,numpy.float64)): #Scalar input
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
closestIndx= [self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=False)
for ii in range(len(R))]
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
#Make sure phi hasn't wrapped around
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot(self._alljacsTrack[jacIndx,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out
def _approxaAInv(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if isinstance(Or,(int,float,numpy.float32,numpy.float64)): #Scalar input
Or= numpy.array([Or])
Op= numpy.array([Op])
Oz= numpy.array([Oz])
ar= numpy.array([ar])
ap= numpy.array([ap])
az= numpy.array([az])
#Calculate apar, angle offset along the stream
closestIndx= [self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=interp)\
for ii in range(len(Or))]
out= numpy.empty((6,len(Or)))
for ii in range(len(Or)):
dOa= numpy.empty(6)
if interp:
dOa[0]= Or[ii]-self._interpolatedObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._interpolatedObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._interpolatedObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._interpolatedObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._interpolatedObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._interpolatedObsTrackAA[closestIndx[ii],5]
jacIndx= self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=False)
else:
dOa[0]= Or[ii]-self._ObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._ObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._ObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._ObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._ObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._ObsTrackAA[closestIndx[ii],5]
jacIndx= closestIndx[ii]
#Make sure the angles haven't wrapped around
if dOa[3] > numpy.pi:
dOa[3]-= 2.*numpy.pi
elif dOa[3] < -numpy.pi:
dOa[3]+= 2.*numpy.pi
if dOa[4] > numpy.pi:
dOa[4]-= 2.*numpy.pi
elif dOa[4] < -numpy.pi:
dOa[4]+= 2.*numpy.pi
if dOa[5] > numpy.pi:
dOa[5]-= 2.*numpy.pi
elif dOa[5] < -numpy.pi:
dOa[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot(self._allinvjacsTrack[jacIndx,:,:],
dOa)
if interp:
out[:,ii]+= self._interpolatedObsTrack[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrack[closestIndx[ii]]
return out
################################EVALUATE THE DF################################
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the DF
INPUT:
Either:
a) R,vR,vT,z,vz,phi ndarray [nobjects]
b) (Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) tuple if aAInput
where:
Omegar - radial frequency
Omegaphi - azimuthal frequency
Omegaz - vertical frequency
angler - radial angle
anglephi - azimuthal angle
anglez - vertical angle
c) Orbit instance or list thereof
log= if True, return the natural log
aaInput= (False) if True, option b above
OUTPUT:
value of DF
HISTORY:
2013-12-03 - Written - Bovy (IAS)
"""
#First parse log
log= kwargs.pop('log',True)
dOmega, dangle= self.prepData4Call(*args,**kwargs)
#Omega part
dOmega4dfOmega= dOmega\
-numpy.tile(self._dsigomeanProg.T,(dOmega.shape[1],1)).T
logdfOmega= -0.5*numpy.sum(dOmega4dfOmega*
numpy.dot(self._sigomatrixinv,
dOmega4dfOmega),
axis=0)-0.5*self._sigomatrixLogdet\
+numpy.log(numpy.fabs(numpy.dot(self._dsigomeanProgDirection,dOmega)))
#Angle part
dangle2= numpy.sum(dangle**2.,axis=0)
dOmega2= numpy.sum(dOmega**2.,axis=0)
dOmegaAngle= numpy.sum(dOmega*dangle,axis=0)
logdfA= -0.5/self._sigangle2*(dangle2-dOmegaAngle**2./dOmega2)\
-2.*self._lnsigangle-0.5*numpy.log(dOmega2)
#Finite stripping part
a0= dOmegaAngle/numpy.sqrt(2.)/self._sigangle/numpy.sqrt(dOmega2)
ad= numpy.sqrt(dOmega2)/numpy.sqrt(2.)/self._sigangle\
*(self._tdisrupt-dOmegaAngle/dOmega2)
loga= numpy.log((special.erf(a0)+special.erf(ad))/2.) #divided by 2 st 0 for well-within the stream
out= logdfA+logdfOmega+loga+self._logmeandetdOdJp
if log:
return out
else:
return numpy.exp(out)
def prepData4Call(self,*args,**kwargs):
"""
NAME:
prepData4Call
PURPOSE:
prepare stream data for the __call__ method
INPUT:
__call__ inputs
OUTPUT:
(dOmega,dangle); wrt the progenitor; each [3,nobj]
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
#First calculate the actionAngle coordinates if they're not given
#as such
freqsAngles= self._parse_call_args(*args,**kwargs)
dOmega= freqsAngles[:3,:]\
-numpy.tile(self._progenitor_Omega.T,(freqsAngles.shape[1],1)).T
dangle= freqsAngles[3:,:]\
-numpy.tile(self._progenitor_angle.T,(freqsAngles.shape[1],1)).T
#Assuming single wrap, resolve large angle differences (wraps should be marginalized over)
dangle[(dangle < -4.)]+= 2.*numpy.pi
dangle[(dangle > 4.)]-= 2.*numpy.pi
return (dOmega,dangle)
def _parse_call_args(self,*args,**kwargs):
"""Helper function to parse the arguments to the __call__ and related functions,
return [6,nobj] array of frequencies (:3) and angles (3:)"""
interp= kwargs.get('interp',self._useInterp)
if len(args) == 5:
raise IOError("Must specify phi for streamdf")
elif len(args) == 6:
if kwargs.get('aAInput',False):
if isinstance(args[0],(int,float,numpy.float32,numpy.float64)):
out= numpy.empty((6,1))
else:
out= numpy.empty((6,len(args[0])))
for ii in range(6):
out[ii,:]= args[ii]
return out
else:
return self._approxaA(*args,interp=interp)
elif isinstance(args[0],Orbit):
o= args[0]
return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(),
interp=interp)
elif isinstance(args[0],list) and isinstance(args[0][0],Orbit):
R, vR, vT, z, vz, phi= [], [], [], [], [], []
for o in args[0]:
R.append(o.R())
vR.append(o.vR())
vT.append(o.vT())
z.append(o.z())
vz.append(o.vz())
phi.append(o.phi())
return self._approxaA(numpy.array(R),numpy.array(vR),
numpy.array(vT),numpy.array(z),
numpy.array(vz),numpy.array(phi),
interp=interp)
def callMarg(self,xy,**kwargs):
"""
NAME:
callMarg
PURPOSE:
evaluate the DF, marginalizing over some directions, in Galactocentric rectangular coordinates (or in observed l,b,D,vlos,pmll,pmbb) coordinates)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
nsigma= (3) number of sigma to marginalize the DF over (approximate sigma)
ngl= (5) order of Gauss-Legendre integration
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the marginalized PDF in these coordinates is returned
Vnorm= (220) circular velocity to normalize with when lb=True
Rnorm= (8) Galactocentric radius to normalize with when lb=True
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
p(xy) marginalized over missing directions in xy
HISTORY:
2013-12-16 - Written - Bovy (IAS)
"""
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
if numpy.sum(coordGiven) == 6:
raise NotImplementedError("When specifying all coordinates, please use __call__ instead of callMarg")
#First construct the Gaussian approximation at this xy
gaussmean, gaussvar= self.gaussApprox(xy,**kwargs)
cholvar, chollower= stable_cho_factor(gaussvar)
#Now Gauss-legendre integrate over missing directions
ngl= kwargs.get('ngl',5)
nsigma= kwargs.get('nsigma',3)
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
coordEval= []
weightEval= []
jj= 0
baseX= (glx+1)/2.
baseX= list(baseX)
baseX.extend(-(glx+1)/2.)
baseX= numpy.array(baseX)
baseW= glw
baseW= list(baseW)
baseW.extend(glw)
baseW= numpy.array(baseW)
for ii in range(6):
if not coordGiven[ii]:
coordEval.append(nsigma*baseX)
weightEval.append(baseW)
jj+= 1
else:
coordEval.append(xy[ii]*numpy.ones(1))
weightEval.append(numpy.ones(1))
mgrid= numpy.meshgrid(*coordEval,indexing='ij')
mgridNotGiven= numpy.array([mgrid[ii].flatten() for ii in range(6)
if not coordGiven[ii]])
mgridNotGiven= numpy.dot(cholvar,mgridNotGiven)
jj= 0
if coordGiven[0]: iX= mgrid[0]
else:
iX= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[1]: iY= mgrid[1]
else:
iY= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[2]: iZ= mgrid[2]
else:
iZ= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[3]: ivX= mgrid[3]
else:
ivX= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[4]: ivY= mgrid[4]
else:
ivY= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[5]: ivZ= mgrid[5]
else:
ivZ= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
iXw, iYw, iZw, ivXw, ivYw, ivZw=\
numpy.meshgrid(*weightEval,indexing='ij')
if kwargs.get('lb',False): #Convert to Galactocentric cylindrical coordinates
#Setup coordinate transformation kwargs
Vnorm= kwargs.get('Vnorm',self._Vnorm)
Rnorm= kwargs.get('Rnorm',self._Rnorm)
R0= kwargs.get('R0',self._R0)
Zsun= kwargs.get('Zsun',self._Zsun)
vsun= kwargs.get('vsun',self._vsun)
tXYZ= bovy_coords.lbd_to_XYZ(iX.flatten(),iY.flatten(),
iZ.flatten(),
degree=True)
iR,iphi,iZ= bovy_coords.XYZ_to_galcencyl(tXYZ[:,0],tXYZ[:,1],
tXYZ[:,2],
Xsun=R0,Ysun=0.,Zsun=Zsun)
tvxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(ivX.flatten(),
ivY.flatten(),
ivZ.flatten(),
tXYZ[:,0],tXYZ[:,1],
tXYZ[:,2],XYZ=True)
ivR,ivT,ivZ= bovy_coords.vxvyvz_to_galcencyl(tvxvyvz[:,0],
tvxvyvz[:,1],
tvxvyvz[:,2],
iR,iphi,iZ,
galcen=True,
vsun=vsun)
iR/= Rnorm
iZ/= Rnorm
ivR/= Vnorm
ivT/= Vnorm
ivZ/= Vnorm
else:
#Convert to cylindrical coordinates
iR,iphi,iZ=\
bovy_coords.rect_to_cyl(iX.flatten(),iY.flatten(),iZ.flatten())
ivR,ivT,ivZ=\
bovy_coords.rect_to_cyl_vec(ivX.flatten(),ivY.flatten(),
ivZ.flatten(),
iR,iphi,iZ,cyl=True)
#Add the additional Jacobian dXdY/dldb... if necessary
if kwargs.get('lb',False):
#Find the nearest track point
interp= kwargs.get('interp',self._useInterp)
if not 'cindx' in kwargs:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Only l,b,d,... to Galactic X,Y,Z,... is necessary because going
#from Galactic to Galactocentric has Jacobian determinant 1
if interp:
addLogDet= self._interpolatedTrackLogDetJacLB[cindx]
else:
addLogDet= self._trackLogDetJacLB[cindx]
else:
addLogDet= 0.
logdf= self(iR,ivR,ivT,iZ,ivZ,iphi,log=True)
return logsumexp(logdf
+numpy.log(iXw.flatten())
+numpy.log(iYw.flatten())
+numpy.log(iZw.flatten())
+numpy.log(ivXw.flatten())
+numpy.log(ivYw.flatten())
+numpy.log(ivZw.flatten()))\
+0.5*numpy.log(numpy.linalg.det(gaussvar))\
+addLogDet
def gaussApprox(self,xy,**kwargs):
"""
NAME:
gaussApprox
PURPOSE:
return the mean and variance of a Gaussian approximation to the stream DF at a given phase-space point in Galactocentric rectangular coordinates (distribution is over missing directions)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the Gaussian approximation in these coordinates is returned
OUTPUT:
(mean,variance) of the approximate Gaussian DF for the missing directions in xy
HISTORY:
2013-12-12 - Written - Bovy (IAS)
"""
interp= kwargs.get('interp',self._useInterp)
lb= kwargs.get('lb',False)
#What are we looking for
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
nGiven= numpy.sum(coordGiven)
#First find the nearest track point
if not 'cindx' in kwargs and lb:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
elif not 'cindx' in kwargs and not lb:
cindx= self._find_closest_trackpoint(*xy,xy=True,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Get the covariance matrix
if interp and lb:
tcov= self._interpolatedAllErrCovsLBUnscaled[cindx]
tmean= self._interpolatedObsTrackLB[cindx]
elif interp and not lb:
tcov= self._interpolatedAllErrCovsXY[cindx]
tmean= self._interpolatedObsTrackXY[cindx]
elif not interp and lb:
tcov= self._allErrCovsLBUnscaled[cindx]
tmean= self._ObsTrackLB[cindx]
elif not interp and not lb:
tcov= self._allErrCovsXY[cindx]
tmean= self._ObsTrackXY[cindx]
if lb:#Apply scale factors
tcov= copy.copy(tcov)
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1))
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1)).T
#Fancy indexing to recover V22, V11, and V12; V22, V11, V12 as in Appendix B of 0905.2979v1
V11indx0= numpy.array([[ii for jj in range(6-nGiven)] for ii in range(6) if not coordGiven[ii]])
V11indx1= numpy.array([[ii for ii in range(6) if not coordGiven[ii]] for jj in range(6-nGiven)])
V11= tcov[V11indx0,V11indx1]
V22indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if coordGiven[ii]])
V22indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(nGiven)])
V22= tcov[V22indx0,V22indx1]
V12indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if not coordGiven[ii]])
V12indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(6-nGiven)])
V12= tcov[V12indx0,V12indx1]
#Also get m1 and m2, again following Appendix B of 0905.2979v1
m1= tmean[True-coordGiven]
m2= tmean[coordGiven]
#conditional mean and variance
V22inv= numpy.linalg.inv(V22)
v2= numpy.array([xy[ii] for ii in range(6) if coordGiven[ii]])
condMean= m1+numpy.dot(V12,numpy.dot(V22inv,v2-m2))
condVar= V11-numpy.dot(V12,numpy.dot(V22inv,V12.T))
return (condMean,condVar)
################################SAMPLE THE DF##################################
def sample(self,n,returnaAdt=False,returndt=False,interp=None,
xy=False,lb=False,
Vnorm=None,Rnorm=None,
R0=None,Zsun=None,vsun=None):
"""
NAME:
sample
PURPOSE:
sample from the DF
INPUT:
n - number of points to return
returnaAdt= (False) if True, return (Omega,angle,dt)
returndT= (False) if True, also return the time since the star was stripped
interp= (object-wide default) use interpolation of the stream track
xy= (False) if True, return Galactocentric rectangular coordinates
lb= (False) if True, return Galactic l,b,d,vlos,pmll,pmbb coordinates
+Coordinate transformation inputs (all default to the instance-wide
values):
Vnorm= circular velocity to normalize velocities with
Rnorm= Galactocentric radius to normalize positions with
R0= Galactocentric radius of the Sun (kpc)
Zsun= Sun's height above the plane (kpc)
vsun= Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
(R,vR,vT,z,vz,phi) of points on the stream in 6,N array
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if interp is None:
interp= self._useInterp
#First sample frequencies
#Sample frequency along largest eigenvalue using ARS
dO1s=\
bovy_ars.bovy_ars([0.,0.],[True,False],
[self._meandO-numpy.sqrt(self._sortedSigOEig[2]),
self._meandO+numpy.sqrt(self._sortedSigOEig[2])],
_h_ars,_hp_ars,nsamples=n,
hxparams=(self._meandO,self._sortedSigOEig[2]),
maxn=100)
dO1s= numpy.array(dO1s)*self._sigMeanSign
dO2s= numpy.random.normal(size=n)*numpy.sqrt(self._sortedSigOEig[1])
dO3s= numpy.random.normal(size=n)*numpy.sqrt(self._sortedSigOEig[0])
#Rotate into dOs in R,phi,z coordinates
dO= numpy.vstack((dO3s,dO2s,dO1s))
dO= numpy.dot(self._sigomatrixEig[1][:,self._sigomatrixEigsortIndx],
dO)
Om= dO+numpy.tile(self._progenitor_Omega.T,(n,1)).T
#Also generate angles
da= numpy.random.normal(size=(3,n))*self._sigangle
#And a random time
dt= numpy.random.uniform(size=n)*self._tdisrupt
#Integrate the orbits relative to the progenitor
da+= dO*numpy.tile(dt,(3,1))
angle= da+numpy.tile(self._progenitor_angle.T,(n,1)).T
if returnaAdt:
return (Om,angle,dt)
#Propagate to R,vR,etc.
RvR= self._approxaAInv(Om[0,:],Om[1,:],Om[2,:],
angle[0,:],angle[1,:],angle[2,:],
interp=interp)
if returndt and not xy and not lb:
return (RvR,dt)
elif not xy and not lb:
return RvR
if xy:
sX= RvR[0]*numpy.cos(RvR[5])
sY= RvR[0]*numpy.sin(RvR[5])
sZ= RvR[3]
svX, svY, svZ=\
bovy_coords.cyl_to_rect_vec(RvR[1],
RvR[2],
RvR[4],
RvR[5])
out= numpy.empty((6,n))
out[0]= sX
out[1]= sY
out[2]= sZ
out[3]= svX
out[4]= svY
out[5]= svZ
if returndt:
return (out,dt)
else:
return out
if lb:
if Vnorm is None:
Vnorm= self._Vnorm
if Rnorm is None:
Rnorm= self._Rnorm
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
XYZ= bovy_coords.galcencyl_to_XYZ(RvR[0]*Rnorm,
RvR[5],
RvR[3]*Rnorm,
Xsun=R0,Zsun=Zsun)
vXYZ= bovy_coords.galcencyl_to_vxvyvz(RvR[1]*Vnorm,
RvR[2]*Vnorm,
RvR[4]*Vnorm,
RvR[5],
vsun=vsun)
slbd=bovy_coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= bovy_coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
out= numpy.empty((6,n))
out[0]= slbd[:,0]
out[1]= slbd[:,1]
out[2]= slbd[:,2]
out[3]= svlbd[:,0]
out[4]= svlbd[:,1]
out[5]= svlbd[:,2]
if returndt:
return (out,dt)
else:
return out
def _h_ars(x,params):
"""ln p(Omega) for ARS"""
mO, sO2= params
return -0.5*(x-mO)**2./sO2+numpy.log(x)
def _hp_ars(x,params):
"""d ln p(Omega) / d Omega for ARS"""
mO, sO2= params
return -(x-mO)/sO2+1./x
def _determine_stream_track_single(aA,progenitorTrack,trackt,
progenitor_angle,sigMeanSign,
dsigomeanProgDirection,meanOmega,
thetasTrack):
#Setup output
allAcfsTrack= numpy.empty((9))
alljacsTrack= numpy.empty((6,6))
allinvjacsTrack= numpy.empty((6,6))
ObsTrack= numpy.empty((6))
ObsTrackAA= numpy.empty((6))
detdOdJ= numpy.empty(6)
#Calculate
tacfs= aA.actionsFreqsAngles(progenitorTrack(trackt),
maxn=3)
allAcfsTrack[0]= tacfs[0][0]
allAcfsTrack[1]= tacfs[1][0]
allAcfsTrack[2]= tacfs[2][0]
for jj in range(3,9):
allAcfsTrack[jj]= tacfs[jj]
tjac= calcaAJac(progenitorTrack(trackt)._orb.vxvv,
aA,
dxv=None,actionsFreqsAngles=True,
lb=False,
_initacfs=tacfs)
alljacsTrack[:,:]= tjac[3:,:]
tinvjac= numpy.linalg.inv(tjac[3:,:])
allinvjacsTrack[:,:]= tinvjac
#Also store detdOdJ
jindx= numpy.array([True,True,True,False,False,False,True,True,True],
dtype='bool')
dOdJ= numpy.dot(tjac[3:,:],numpy.linalg.inv(tjac[jindx,:]))[0:3,0:3]
detdOdJ= numpy.linalg.det(dOdJ)
theseAngles= numpy.mod(progenitor_angle\
+thetasTrack\
*sigMeanSign\
*dsigomeanProgDirection,
2.*numpy.pi)
ObsTrackAA[3:]= theseAngles
diffAngles= theseAngles-allAcfsTrack[6:]
diffAngles[(diffAngles > numpy.pi)]= diffAngles[(diffAngles > numpy.pi)]-2.*numpy.pi
diffAngles[(diffAngles < -numpy.pi)]= diffAngles[(diffAngles < -numpy.pi)]+2.*numpy.pi
thisFreq= meanOmega(thetasTrack)
ObsTrackAA[:3]= thisFreq
diffFreqs= thisFreq-allAcfsTrack[3:6]
ObsTrack[:]= numpy.dot(tinvjac,
numpy.hstack((diffFreqs,diffAngles)))
ObsTrack[0]+= \
progenitorTrack(trackt).R()
ObsTrack[1]+= \
progenitorTrack(trackt).vR()
ObsTrack[2]+= \
progenitorTrack(trackt).vT()
ObsTrack[3]+= \
progenitorTrack(trackt).z()
ObsTrack[4]+= \
progenitorTrack(trackt).vz()
ObsTrack[5]+= \
progenitorTrack(trackt).phi()
return [allAcfsTrack,alljacsTrack,allinvjacsTrack,ObsTrack,ObsTrackAA,
detdOdJ]
def _determine_stream_spread_single(sigomatrixEig,
thetasTrack,
sigOmega,
sigAngle,
allinvjacsTrack):
"""sigAngle input may either be a function that returns the dispersion in
perpendicular angle as a function of parallel angle, or a value"""
#Estimate the spread in all frequencies and angles
sigObig2= sigOmega(thetasTrack)**2.
tsigOdiag= copy.copy(sigomatrixEig[0])
tsigOdiag[numpy.argmax(tsigOdiag)]= sigObig2
tsigO= numpy.dot(sigomatrixEig[1],
numpy.dot(numpy.diag(tsigOdiag),
numpy.linalg.inv(sigomatrixEig[1])))
#angles
if hasattr(sigAngle,'__call__'):
sigangle2= sigAngle(thetasTrack)**2.
else:
sigangle2= sigAngle**2.
tsigadiag= numpy.ones(3)*sigangle2
tsigadiag[numpy.argmax(tsigOdiag)]= 1.
tsiga= numpy.dot(sigomatrixEig[1],
numpy.dot(numpy.diag(tsigadiag),
numpy.linalg.inv(sigomatrixEig[1])))
#correlations, assume half correlated for now (can be calculated)
correlations= numpy.diag(0.5*numpy.ones(3))*numpy.sqrt(tsigOdiag*tsigadiag)
correlations[numpy.argmax(tsigOdiag),numpy.argmax(tsigOdiag)]= 0.
correlations= numpy.dot(sigomatrixEig[1],
numpy.dot(correlations,
numpy.linalg.inv(sigomatrixEig[1])))
#Now convert
fullMatrix= numpy.empty((6,6))
fullMatrix[:3,:3]= tsigO
fullMatrix[3:,3:]= tsiga
fullMatrix[3:,:3]= correlations
fullMatrix[:3,3:]= correlations.T
return numpy.dot(allinvjacsTrack,numpy.dot(fullMatrix,allinvjacsTrack.T))
def calcaAJac(xv,aA,dxv=None,freqs=False,dOdJ=False,actionsFreqsAngles=False,
lb=False,coordFunc=None,
Vnorm=220.,Rnorm=8.,R0=8.,Zsun=0.025,vsun=[-11.1,8.*30.24,7.25],
_initacfs=None):
"""
NAME:
calcaAJac
PURPOSE:
calculate the Jacobian d(J,theta)/d(x,v)
INPUT:
xv - phase-space point: Either
1) [R,vR,vT,z,vz,phi]
2) [l,b,D,vlos,pmll,pmbb] (if lb=True, see below)
3) list/array of 6 numbers that can be transformed into (normalized) R,vR,vT,z,vz,phi using coordFunc
aA - actionAngle instance
dxv - infinitesimal to use (rescaled for lb, so think fractionally))
freqs= (False) if True, go to frequencies rather than actions
dOdJ= (False), actually calculate d Frequency / d action
actionsFreqsAngles= (False) if True, calculate d(action,freq.,angle)/d (xv)
lb= (False) if True, start with (l,b,D,vlos,pmll,pmbb) in (deg,deg,kpc,km/s,mas/yr,mas/yr)
Vnorm= (220) circular velocity to normalize with when lb=True
Rnorm= (8) Galactocentric radius to normalize with when lb=True
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
coordFunc= (None) if set, this is a function that takes xv and returns R,vR,vT,z,vz,phi in normalized units (units where vc=1 at r=1 if the potential is normalized that way, for example)
OUTPUT:
Jacobian matrix
HISTORY:
2013-11-25 - Written - Bovy (IAS)
"""
if lb:
coordFunc= lambda x: lbCoordFunc(xv,Vnorm,Rnorm,R0,Zsun,vsun)
if not coordFunc is None:
R, vR, vT, z, vz, phi= coordFunc(xv)
else:
R, vR, vT, z, vz, phi= xv[0],xv[1],xv[2],xv[3],xv[4],xv[5]
if dxv is None:
dxv= 10.**-8.*numpy.ones(6)
if lb:
#Re-scale some of the differences, to be more natural
dxv[0]*= 180./numpy.pi
dxv[1]*= 180./numpy.pi
dxv[2]*= Rnorm
dxv[3]*= Vnorm
dxv[4]*= Vnorm/4.74047/xv[2]
dxv[5]*= Vnorm/4.74047/xv[2]
if actionsFreqsAngles:
jac= numpy.zeros((9,6))
else:
jac= numpy.zeros((6,6))
if dOdJ:
jac2= numpy.zeros((6,6))
if _initacfs is None:
jr,lz,jz,Or,Ophi,Oz,ar,aphi,az\
= aA.actionsFreqsAngles(R,vR,vT,z,vz,phi,maxn=3)
else:
jr,lz,jz,Or,Ophi,Oz,ar,aphi,az\
= _initacfs
for ii in range(6):
temp= xv[ii]+dxv[ii] #Trick to make sure dxv is representable
dxv[ii]= temp-xv[ii]
xv[ii]+= dxv[ii]
if not coordFunc is None:
tR, tvR, tvT, tz, tvz, tphi= coordFunc(xv)
else:
tR, tvR, tvT, tz, tvz, tphi= xv[0],xv[1],xv[2],xv[3],xv[4],xv[5]
tjr,tlz,tjz,tOr,tOphi,tOz,tar,taphi,taz\
= aA.actionsFreqsAngles(tR,tvR,tvT,tz,tvz,tphi,maxn=3)
xv[ii]-= dxv[ii]
angleIndx= 3
if actionsFreqsAngles:
jac[0,ii]= (tjr-jr)/dxv[ii]
jac[1,ii]= (tlz-lz)/dxv[ii]
jac[2,ii]= (tjz-jz)/dxv[ii]
jac[3,ii]= (tOr-Or)/dxv[ii]
jac[4,ii]= (tOphi-Ophi)/dxv[ii]
jac[5,ii]= (tOz-Oz)/dxv[ii]
angleIndx= 6
elif freqs:
jac[0,ii]= (tOr-Or)/dxv[ii]
jac[1,ii]= (tOphi-Ophi)/dxv[ii]
jac[2,ii]= (tOz-Oz)/dxv[ii]
else:
jac[0,ii]= (tjr-jr)/dxv[ii]
jac[1,ii]= (tlz-lz)/dxv[ii]
jac[2,ii]= (tjz-jz)/dxv[ii]
if dOdJ:
jac2[0,ii]= (tOr-Or)/dxv[ii]
jac2[1,ii]= (tOphi-Ophi)/dxv[ii]
jac2[2,ii]= (tOz-Oz)/dxv[ii]
#For the angles, make sure we do not hit a turning point
if tar-ar > numpy.pi:
jac[angleIndx,ii]= (tar-ar-2.*numpy.pi)/dxv[ii]
elif tar-ar < -numpy.pi:
jac[angleIndx,ii]= (tar-ar+2.*numpy.pi)/dxv[ii]
else:
jac[angleIndx,ii]= (tar-ar)/dxv[ii]
if taphi-aphi > numpy.pi:
jac[angleIndx+1,ii]= (taphi-aphi-2.*numpy.pi)/dxv[ii]
elif taphi-aphi < -numpy.pi:
jac[angleIndx+1,ii]= (taphi-aphi+2.*numpy.pi)/dxv[ii]
else:
jac[angleIndx+1,ii]= (taphi-aphi)/dxv[ii]
if taz-az > numpy.pi:
jac[angleIndx+2,ii]= (taz-az-2.*numpy.pi)/dxv[ii]
if taz-az < -numpy.pi:
jac[angleIndx+2,ii]= (taz-az+2.*numpy.pi)/dxv[ii]
else:
jac[angleIndx+2,ii]= (taz-az)/dxv[ii]
if dOdJ:
jac2[3,:]= jac[3,:]
jac2[4,:]= jac[4,:]
jac2[5,:]= jac[5,:]
jac= numpy.dot(jac2,numpy.linalg.inv(jac))[0:3,0:3]
return jac
def lbCoordFunc(xv,Vnorm,Rnorm,R0,Zsun,vsun):
#Input is (l,b,D,vlos,pmll,pmbb) in (deg,deg,kpc,km/s,mas/yr,mas/yr)
X,Y,Z= bovy_coords.lbd_to_XYZ(xv[0],xv[1],xv[2],degree=True)
R,phi,Z= bovy_coords.XYZ_to_galcencyl(X,Y,Z,
Xsun=R0,Ysun=0.,Zsun=Zsun)
vx,vy,vz= bovy_coords.vrpmllpmbb_to_vxvyvz(xv[3],xv[4],xv[5],
X,Y,Z,XYZ=True)
vR,vT,vZ= bovy_coords.vxvyvz_to_galcencyl(vx,vy,vz,R,phi,Z,galcen=True,
vsun=vsun)
R/= Rnorm
Z/= Rnorm
vR/= Vnorm
vT/= Vnorm
vZ/= Vnorm
return (R,vR,vT,Z,vZ,phi)
| followthesheep/galpy | galpy/df_src/streamdf.py | Python | bsd-3-clause | 117,381 |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class Location(models.Model):
address = models.CharField(blank=True)
latitude = models.DecimalField(max_digits=10, decimal_places=6)
longitude = models.DecimalField(max_digits=10, decimal_places=6)
created = models.DateTimeField(auto_add_now=True, editable=False)
updated = models.DateTimeField(auto_add=True, editable=False)
owner = models.ForeignKey(User)
def get_absolute_url(self):
return reverse('location-detail', args=[str(self.id)])
def __str__(self):
return '{id: %d, latitude: %d, longitude: %d}' % (
self.id,
self.latitude,
self.longitude
)
class Meta:
app_label = 'locations'
get_latest_by = 'updated'
ordering = ['updated']
verbose_name = 'location'
verbose_name_plural = 'Locations'
| derrickyoo/serve-tucson | serve_tucson/locations/models.py | Python | bsd-3-clause | 956 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import pytest
import requests
import requests.exceptions
from tests.constants import LOCALHOST_REGISTRY_HTTP, DOCKER0_REGISTRY_HTTP, MOCK, TEST_IMAGE
from tests.util import uuid_value
from osbs.utils import ImageName
from atomic_reactor.core import ContainerTasker
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from atomic_reactor.inner import DockerBuildWorkflow
from tests.constants import MOCK_SOURCE
if MOCK:
from tests.docker_mock import mock_docker
@pytest.fixture()
def temp_image_name():
return ImageName(repo=("atomic-reactor-tests-%s" % uuid_value()))
@pytest.fixture()
def is_registry_running():
"""
is docker registry running (at {docker0,lo}:5000)?
"""
try:
lo_response = requests.get(LOCALHOST_REGISTRY_HTTP)
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
try:
lo_response = requests.get(DOCKER0_REGISTRY_HTTP) # leap of faith
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
return True
@pytest.fixture(scope="module")
def docker_tasker():
if MOCK:
mock_docker()
ct = ContainerTasker(retry_times=0)
ct.build_method = CONTAINER_DOCKERPY_BUILD_METHOD
return ct
@pytest.fixture(params=[True, False])
def reactor_config_map(request):
return request.param
@pytest.fixture(params=[True, False])
def inspect_only(request):
return request.param
@pytest.fixture
def user_params(monkeypatch):
"""
Setting default image_tag in the env var USER_PARAMS. Any tests requiring
to create an instance of :class:`DockerBuildWorkflow` requires this fixture.
"""
monkeypatch.setenv('USER_PARAMS', json.dumps({'image_tag': TEST_IMAGE}))
@pytest.fixture
def workflow(user_params):
return DockerBuildWorkflow(source=MOCK_SOURCE)
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
if report.passed or report.skipped:
del cells[:]
| projectatomic/atomic-reactor | tests/conftest.py | Python | bsd-3-clause | 2,306 |
import shutil
import os
import jinja2
import string
import subprocess
import re
from xen.provisioning.HdManager import HdManager
from settings.settingsLoader import OXA_XEN_SERVER_KERNEL,OXA_XEN_SERVER_INITRD,OXA_DEBIAN_INTERFACES_FILE_LOCATION,OXA_DEBIAN_UDEV_FILE_LOCATION, OXA_DEBIAN_HOSTNAME_FILE_LOCATION, OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION
from utils.Logger import Logger
class OfeliaDebianVMConfigurator:
logger = Logger.getLogger()
''' Private methods '''
@staticmethod
def __configureInterfacesFile(vm,iFile):
#Loopback
iFile.write("auto lo\niface lo inet loopback\n\n")
#Interfaces
for inter in vm.xen_configuration.interfaces.interface :
if inter.ismgmt:
#is a mgmt interface
interfaceString = "auto "+inter.name+"\n"+\
"iface "+inter.name+" inet static\n"+\
"\taddress "+inter.ip +"\n"+\
"\tnetmask "+inter.mask+"\n"
if inter.gw != None and inter.gw != "":
interfaceString +="\tgateway "+inter.gw+"\n"
if inter.dns1 != None and inter.dns1 != "":
interfaceString+="\tdns-nameservers "+inter.dns1
if inter.dns2 != None and inter.dns2 != "":
interfaceString+=" "+inter.dns2
interfaceString +="\n\n"
iFile.write(interfaceString)
else:
#is a data interface
iFile.write("auto "+inter.name+"\n\n")
@staticmethod
def __configureUdevFile(vm,uFile):
for inter in vm.xen_configuration.interfaces.interface:
uFile.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="'+inter.mac+'", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="'+inter.name+'"\n')
@staticmethod
def __configureHostname(vm,hFile):
hFile.write(vm.name)
@staticmethod
def __createParavirtualizationFileHdConfigFile(vm,env):
template_name = "paraVirtualizedFileHd.pt"
template = env.get_template(template_name)
#Set vars&render
output = template.render(
kernelImg=OXA_XEN_SERVER_KERNEL,
initrdImg=OXA_XEN_SERVER_INITRD,
hdFilePath=HdManager.getHdPath(vm),
swapFilePath=HdManager.getSwapPath(vm),
vm=vm)
#write file
cfile = open(HdManager.getConfigFilePath(vm),'w')
cfile.write(output)
cfile.close()
''' Public methods '''
@staticmethod
def getIdentifier():
return OfeliaDebianVMConfigurator.__name__
@staticmethod
def _configureNetworking(vm,path):
#Configure interfaces and udev settings
try:
try:
#Backup current files
shutil.copy(path+OXA_DEBIAN_INTERFACES_FILE_LOCATION,path+OXA_DEBIAN_INTERFACES_FILE_LOCATION+".bak")
shutil.copy(path+OXA_DEBIAN_UDEV_FILE_LOCATION,path+OXA_DEBIAN_UDEV_FILE_LOCATION+".bak")
except Exception as e:
pass
with open(path+OXA_DEBIAN_INTERFACES_FILE_LOCATION,'w') as openif:
OfeliaDebianVMConfigurator.__configureInterfacesFile(vm,openif)
with open(path+OXA_DEBIAN_UDEV_FILE_LOCATION,'w') as openudev:
OfeliaDebianVMConfigurator.__configureUdevFile(vm,openudev)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error(str(e))
raise Exception("Could not configure interfaces or Udev file")
@staticmethod
def _configureLDAPSettings(vm,path):
try:
file = open(path+OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "r")
text = file.read()
file.close()
file = open(path+OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "w")
#Scape spaces and tabs
projectName = string.replace(vm.project_name,' ','_')
projectName = string.replace(projectName,'\t','__')
file.write(text.replace("__projectId","@proj_"+vm.project_id+"_"+projectName))
file.close()
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Could not configure LDAP file!! - "+str(e))
@staticmethod
def _configureHostName(vm,path):
try:
with open(path+OXA_DEBIAN_HOSTNAME_FILE_LOCATION,'w') as openhost:
OfeliaDebianVMConfigurator.__configureHostname(vm, openhost)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Could not configure hostname;skipping.. - "+str(e))
@staticmethod
def _configureSSHServer(vm,path):
try:
OfeliaDebianVMConfigurator.logger.debug("Regenerating SSH keys...\n Deleting old keys...")
subprocess.check_call("rm -f "+path+"/etc/ssh/ssh_host_*", shell=True, stdout=None)
#subprocess.check_call("chroot "+path+" dpkg-reconfigure openssh-server ", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH1 key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_key -N '' -t rsa1", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH2 RSA key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_rsa_key -N '' -t rsa", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH2 DSA key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_dsa_key -N '' -t dsa", shell=True, stdout=None)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Fatal error; could not regenerate SSH keys. Aborting to prevent VM to be unreachable..."+str(e))
raise e
#Public methods
@staticmethod
def createVmConfigurationFile(vm):
#get env
template_dirs = []
template_dirs.append(os.path.join(os.path.dirname(__file__), 'templates/'))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs))
if vm.xen_configuration.hd_setup_type == "file-image" and vm.xen_configuration.virtualization_setup_type == "paravirtualization" :
OfeliaDebianVMConfigurator.__createParavirtualizationFileHdConfigFile(vm,env)
else:
raise Exception("type of file or type of virtualization not supported for the creation of xen vm configuration file")
@staticmethod
def configureVmDisk(vm, path):
if not path or not re.match(r'[\s]*\/\w+\/\w+\/.*', path,re.IGNORECASE): #For security, should never happen anyway
raise Exception("Incorrect vm path")
#Configure networking
OfeliaDebianVMConfigurator._configureNetworking(vm,path)
OfeliaDebianVMConfigurator.logger.info("Network configured successfully...")
#Configure LDAP settings
OfeliaDebianVMConfigurator._configureLDAPSettings(vm,path)
OfeliaDebianVMConfigurator.logger.info("Authentication configured successfully...")
#Configure Hostname
OfeliaDebianVMConfigurator._configureHostName(vm,path)
OfeliaDebianVMConfigurator.logger.info("Hostname configured successfully...")
#Regenerate SSH keys
OfeliaDebianVMConfigurator._configureSSHServer(vm,path)
OfeliaDebianVMConfigurator.logger.info("SSH have been keys regenerated...")
| avlach/univbris-ocf | vt_manager/src/python/agent/xen/provisioning/configurators/ofelia/OfeliaDebianVMConfigurator.py | Python | bsd-3-clause | 6,611 |
"""
This module implements atom/bond/structure-wise descriptor calculated from
pretrained megnet model
"""
import os
from typing import Dict, Union
import numpy as np
from tensorflow.keras.models import Model
from megnet.models import GraphModel, MEGNetModel
from megnet.utils.typing import StructureOrMolecule
DEFAULT_MODEL = os.path.join(os.path.dirname(__file__), "../../mvl_models/mp-2019.4.1/formation_energy.hdf5")
class MEGNetDescriptor:
"""
MEGNet descriptors. This class takes a trained model and
then compute the intermediate outputs as structure features
"""
def __init__(self, model_name: Union[str, GraphModel, MEGNetModel] = DEFAULT_MODEL, use_cache: bool = True):
"""
Args:
model_name (str or MEGNetModel): trained model. If it is
str, then only models in mvl_models are used.
use_cache (bool): whether to use cache for structure
graph calculations
"""
if isinstance(model_name, str):
model = MEGNetModel.from_file(model_name)
elif isinstance(model_name, GraphModel):
model = model_name
else:
raise ValueError("model_name only support str or GraphModel object")
layers = model.layers
important_prefix = ["meg", "set", "concatenate"]
all_names = [i.name for i in layers if any(i.name.startswith(j) for j in important_prefix)]
if any(i.startswith("megnet") for i in all_names):
self.version = "v2"
else:
self.version = "v1"
valid_outputs = [i.output for i in layers if any(i.name.startswith(j) for j in important_prefix)]
outputs = []
valid_names = []
for i, j in zip(all_names, valid_outputs):
if isinstance(j, list):
for k, l in enumerate(j):
valid_names.append(i + f"_{k}")
outputs.append(l)
else:
valid_names.append(i)
outputs.append(j)
full_model = Model(inputs=model.inputs, outputs=outputs)
model.model = full_model
self.model = model
self.valid_names = valid_names
self._cache: Dict[str, float] = {}
self.use_cache = use_cache
def _predict_structure(self, structure: StructureOrMolecule) -> np.ndarray:
graph = self.model.graph_converter.convert(structure)
inp = self.model.graph_converter.graph_to_input(graph)
return self.model.predict(inp)
def _predict_feature(self, structure: StructureOrMolecule) -> np.ndarray:
if not self.use_cache:
return self._predict_structure(structure)
s = str(structure)
if s in self._cache:
return self._cache[s]
result = self._predict_structure(structure)
self._cache[s] = result
return result
def _get_features(self, structure: StructureOrMolecule, prefix: str, level: int, index: int = None) -> np.ndarray:
name = prefix
if level is not None:
name = f"{prefix}_{level}"
if index is not None:
name += f"_{index}"
if name not in self.valid_names:
raise ValueError(f"{name} not in original megnet model")
ind = self.valid_names.index(name)
out_all = self._predict_feature(structure)
return out_all[ind][0]
def _get_updated_prefix_level(self, prefix: str, level: int):
mapping = {
"meg_net_layer": ["megnet", level - 1],
"set2_set": ["set2set_atom" if level == 1 else "set2set_bond", None],
"concatenate": ["concatenate", None],
}
if self.version == "v2":
return mapping[prefix][0], mapping[prefix][1] # type: ignore
return prefix, level
def get_atom_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray:
"""
Get megnet atom features from structure
Args:
structure: pymatgen structure or molecule
level: int, indicating the block number of megnet, starting
from 1
Returns:
nxm atomic feature matrix
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=0)
def get_bond_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray:
"""
Get bond features at megnet block level
Args:
structure: pymatgen structure
level: int
Returns:
n_bond x m bond feature matrix
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=1)
def get_global_features(self, structure: StructureOrMolecule, level: int = 2) -> np.ndarray:
"""
Get state features at megnet block level
Args:
structure: pymatgen structure or molecule
level: int
Returns:
1 x m_g global feature vector
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=2)
def get_set2set(self, structure: StructureOrMolecule, ftype: str = "atom") -> np.ndarray:
"""
Get set2set output as features
Args:
structure (StructureOrMolecule): pymatgen structure
or molecule
ftype (str): atom or bond
Returns:
feature matrix, each row is a vector for an atom
or bond
"""
mapping = {"atom": 1, "bond": 2}
prefix, level = self._get_updated_prefix_level("set2_set", level=mapping[ftype])
return self._get_features(structure, prefix=prefix, level=level)
def get_structure_features(self, structure: StructureOrMolecule) -> np.ndarray:
"""
Get structure level feature vector
Args:
structure (StructureOrMolecule): pymatgen structure
or molecule
Returns:
one feature vector for the structure
"""
prefix, level = self._get_updated_prefix_level("concatenate", level=1)
return self._get_features(structure, prefix=prefix, level=level)
| materialsvirtuallab/megnet | megnet/utils/descriptor.py | Python | bsd-3-clause | 6,396 |
from traitlets import Unicode, Bool
from textwrap import dedent
from .. import utils
from . import NbGraderPreprocessor
class ClearSolutions(NbGraderPreprocessor):
code_stub = Unicode(
"# YOUR CODE HERE\nraise NotImplementedError()",
config=True,
help="The code snippet that will replace code solutions")
text_stub = Unicode(
"YOUR ANSWER HERE",
config=True,
help="The text snippet that will replace written solutions")
comment_mark = Unicode(
"#",
config=True,
help="The comment mark to prefix solution delimiters")
begin_solution_delimeter = Unicode(
"## BEGIN SOLUTION",
config=True,
help="The delimiter marking the beginning of a solution (excluding comment mark)")
end_solution_delimeter = Unicode(
"## END SOLUTION",
config=True,
help="The delimiter marking the end of a solution (excluding comment mark)")
enforce_metadata = Bool(
True,
config=True,
help=dedent(
"""
Whether or not to complain if cells containing solutions regions are
not marked as solution cells. WARNING: this will potentially cause
things to break if you are using the full nbgrader pipeline. ONLY
disable this option if you are only ever planning to use nbgrader
assign.
"""
)
)
@property
def begin_solution(self):
return "{}{}".format(self.comment_mark, self.begin_solution_delimeter)
@property
def end_solution(self):
return "{}{}".format(self.comment_mark, self.end_solution_delimeter)
def _replace_solution_region(self, cell):
"""Find a region in the cell that is delimeted by
`self.begin_solution` and `self.end_solution` (e.g. ### BEGIN
SOLUTION and ### END SOLUTION). Replace that region either
with the code stub or text stub, depending the cell type.
This modifies the cell in place, and then returns True if a
solution region was replaced, and False otherwise.
"""
# pull out the cell input/source
lines = cell.source.split("\n")
if cell.cell_type == "code":
stub_lines = self.code_stub.split("\n")
else:
stub_lines = self.text_stub.split("\n")
new_lines = []
in_solution = False
replaced_solution = False
for line in lines:
# begin the solution area
if line.strip() == self.begin_solution:
# check to make sure this isn't a nested BEGIN
# SOLUTION region
if in_solution:
raise RuntimeError(
"encountered nested begin solution statements")
in_solution = True
replaced_solution = True
# replace it with the stub, indented as necessary
indent = line[:line.find(self.begin_solution)]
for stub_line in stub_lines:
new_lines.append(indent + stub_line)
# end the solution area
elif line.strip() == self.end_solution:
in_solution = False
# add lines as long as it's not in the solution area
elif not in_solution:
new_lines.append(line)
# we finished going through all the lines, but didn't find a
# matching END SOLUTION statment
if in_solution:
raise RuntimeError("no end solution statement found")
# replace the cell source
cell.source = "\n".join(new_lines)
return replaced_solution
def preprocess(self, nb, resources):
nb, resources = super(ClearSolutions, self).preprocess(nb, resources)
if 'celltoolbar' in nb.metadata:
del nb.metadata['celltoolbar']
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
# replace solution regions with the relevant stubs
replaced_solution = self._replace_solution_region(cell)
# determine whether the cell is a solution/grade cell
is_solution = utils.is_solution(cell)
# check that it is marked as a solution cell if we replaced a solution
# region -- if it's not, then this is a problem, because the cell needs
# to be given an id
if not is_solution and replaced_solution:
if self.enforce_metadata:
raise RuntimeError(
"Solution region detected in a non-solution cell; please make sure "
"all solution regions are within solution cells."
)
# replace solution cells with the code/text stub -- but not if
# we already replaced a solution region, because that means
# there are parts of the cells that should be preserved
if is_solution and not replaced_solution:
if cell.cell_type == 'code':
cell.source = self.code_stub
else:
cell.source = self.text_stub
return cell, resources
| EdwardJKim/nbgrader | nbgrader/preprocessors/clearsolutions.py | Python | bsd-3-clause | 5,142 |
# Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd.
import re
import operator
import os
BASE_DIR = (os.path.dirname(os.path.abspath(__file__)))
debug = False
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile(r'[^a-zA-Z0-9_\+\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
# leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(r'[!\?;:\[\]\t\"\(\)]|\s\-\s|[^0-9],[^a-zA-Z0-9]|\.[^a-zA-Z0-9]|\.$')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b'+word+r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length-1
# if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree # orig.
# word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item]+word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item]/(word_frequency[item]*1.0) # orig.
# word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path=os.path.join(BASE_DIR, "SmartStoplist.txt")):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if __name__ == "__main__":
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
# stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = os.path.join(BASE_DIR, "SmartStoplist.txt") # SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print keywordcandidates
sortedKeywords = sorted(keywordcandidates.iteritems(), key=operator.itemgetter(1), reverse=True)
if debug: print sortedKeywords
totalKeywords = len(sortedKeywords)
if debug: print totalKeywords
print sortedKeywords[0:(totalKeywords/3)]
rake = Rake(stoppath)
keywords = rake.run(text)
print keywords
| idf/tagr | rake_app/rake/rake.py | Python | bsd-3-clause | 6,809 |
from django.utils.datastructures import SortedDict
from bencode import bencode, bdecode
def sort_dict(D):
result = SortedDict()
for key in sorted(D.keys()):
if type(D[key]) is dict:
D[key] = sort_dict(D[key])
result[key] = D[key]
return result | abshkd/benzene | torrents/utils/__init__.py | Python | bsd-3-clause | 254 |
import klampt.math.autodiff.ad as ad
import torch,numpy as np
class TorchModuleFunction(ad.ADFunctionInterface):
"""Converts a PyTorch function to a Klamp't autodiff function class."""
def __init__(self,module):
self.module=module
self._eval_params=[]
torch.set_default_dtype(torch.float64)
def __str__(self):
return str(self.module)
def n_in(self,arg):
return -1
def n_out(self):
return -1
def eval(self,*args):
self._eval_params=[]
for a in args:
if not isinstance(a,np.ndarray):
a=np.array([a])
p=torch.Tensor(a)
p.requires_grad_(True)
self._eval_params.append(p)
try:
self._eval_result=torch.flatten(self.module(*self._eval_params))
#self._eval_result.forward()
except Exception as e:
print('Torch error: %s'%str(e))
return self._eval_result.detach().numpy()
def derivative(self,arg,*args):
#lazily check if forward has been done before
if not self._same_param(*args):
self.eval(*args)
rows=[]
for i in range(self._eval_result.shape[0]):
if self._eval_params[arg].grad is not None:
self._eval_params[arg].grad.zero_()
#this is a major performance penalty, torch does not support jacobian
#we have to do it row by row
self._eval_result[i].backward(retain_graph=True)
rows.append(self._eval_params[arg].grad.detach().numpy().flatten())
return np.vstack(rows)
def jvp(self,arg,darg,*args):
raise NotImplementedError('')
def _same_param(self,*args):
if not hasattr(self,"_eval_params"):
return False
if len(self._eval_params)!=len(args):
return False
for p,a in zip(self._eval_params,args):
pn = p.detach().numpy()
if not isinstance(a,np.ndarray):
a=np.array([a])
if pn.shape != a.shape:
return False
if (pn!=a).any():
return False
return True
class ADModule(torch.autograd.Function):
"""Converts a Klamp't autodiff function call or function instance to a
PyTorch Function. The class must be created with the terminal symbols
corresponding to the PyTorch arguments to which this is called.
"""
@staticmethod
def forward(ctx,func,terminals,*args):
torch.set_default_dtype(torch.float64)
if len(args)!=len(terminals):
raise ValueError("Function %s expected to have %d arguments, instead got %d"%(str(func),len(terminals),len(args)))
if isinstance(func,ad.ADFunctionCall):
context={}
for t,a in zip(terminals,args):
context[t.name]=a.detach().numpy()
ret=func.eval(**context)
elif isinstance(func,ad.ADFunctionInterface):
context=[]
for t,a in zip(terminals,args):
context.append(a.detach().numpy())
ret=func.eval(*context)
else:
raise ValueError("f must be a ADFunctionCall or ADFunctionInterface")
ctx.saved_state=(func,terminals,context)
return torch.Tensor(ret)
@staticmethod
def backward(ctx,grad):
ret = [None,None]
func,terminals,context = ctx.saved_state
if isinstance(func,ad.ADFunctionCall):
for k in range(len(terminals)):
if isinstance(terminals[k],ad.ADTerminal):
name = terminals[k].name
else:
name = terminals[k]
deriv=torch.Tensor(func.derivative(name,**context))
ret.append(deriv.T@grad)
elif isinstance(func,ad.ADFunctionInterface):
for k in range(len(terminals)):
deriv=torch.Tensor(func.derivative(k,*context))
ret.append(deriv.T@grad)
else:
raise ValueError("f must be a ADFunctionCall or ADFunctionInterface")
return tuple(ret)
@staticmethod
def check_derivatives_torch(func,terminals,h=1e-6,rtol=1e-2,atol=1e-3):
#sample some random parameters of the appropriate length
if isinstance(func,ad.ADFunctionInterface):
params=[]
for i in range(len(terminals)):
try:
N = func.n_in(i)
if N < 0:
N = 10
except NotImplementedError:
N = 10
params.append(torch.randn(N))
else:
N = 10
params = [torch.randn(N) for i in range(len(terminals))]
for p in params:
p.requires_grad_(True)
torch.autograd.gradcheck(ADModule.apply,tuple([func,terminals]+params),eps=h,atol=atol,rtol=rtol,raise_exception=True)
def torch_to_ad(module,args):
"""Converts a PyTorch function applied to args (list of scalars or numpy
arrays) to a Klamp't autodiff function call on those arguments."""
wrapper=TorchModuleFunction(module)
return wrapper(*args)
def ad_to_torch(func,terminals=None):
"""Converts a Klamp't autodiff function call or function instance to a
PyTorch Function. If terminals is provided, this is the list of arguments
that PyTorch will expect. Otherwise, the variables in the expression
will be automatically determined by the forward traversal order."""
if terminals is None:
if isinstance(func,ad.ADFunctionCall):
terminals = func.terminals()
else:
n_args = func.n_args()
terminals = [func.argname(i) for i in range(n_args)]
else:
if isinstance(func,ad.ADFunctionCall):
fterminals = func.terminals()
if len(terminals) != len(fterminals):
raise ValueError("The number of terminals provided is incorrect")
for t in terminals:
if isinstance(t,ad.ADTerminal):
name = t.name
else:
name = t
if name not in fterminals:
raise ValueError("Invalid terminal %s, function call %s only has terminals %s"%(name,str(func),str(terminals)))
else:
try:
if len(terminals) != func.n_args():
raise ValueError("Invalid number of terminals, function %s expects %d"%(str(func),func.n_args()))
except NotImplementedError:
pass
return ADModule(func,terminals) | krishauser/Klampt | Python/klampt/math/autodiff/pytorch.py | Python | bsd-3-clause | 6,627 |
# Copyright 2008-2010 Neil Martinsen-Burrell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Copyright 2018 Jo Bovy
#
# Made small changes to allow this module to be used in Python 3, but only
# to be able to read the files used in the mwdust package. Keep the same
# license as above
"""Defines a file-derived class to read/write Fortran unformatted files.
The assumption is that a Fortran unformatted file is being written by
the Fortran runtime as a sequence of records. Each record consists of
an integer (of the default size [usually 32 or 64 bits]) giving the
length of the following data in bytes, then the data itself, then the
same integer as before.
Examples
--------
To use the default endian and precision settings, one can just do::
>>> f = FortranFile('filename')
>>> x = f.readReals()
One can read arrays with varying precisions::
>>> f = FortranFile('filename')
>>> x = f.readInts('h')
>>> y = f.readInts('q')
>>> z = f.readReals('f')
Where the format codes are those used by Python's struct module.
One can change the default endian-ness and header precision::
>>> f = FortranFile('filename', endian='>', header_prec='l')
for a file with little-endian data whose record headers are long
integers.
"""
__docformat__ = "restructuredtext en"
import sys
_PY3= sys.version > '3'
if _PY3:
from io import FileIO
file= FileIO
import numpy
class FortranFile(file):
"""File with methods for dealing with fortran unformatted data files"""
def _get_header_length(self):
return numpy.dtype(self._header_prec).itemsize
_header_length = property(fget=_get_header_length)
def _set_endian(self,c):
"""Set endian to big (c='>') or little (c='<') or native (c='=')
:Parameters:
`c` : string
The endian-ness to use when reading from this file.
"""
if c in '<>@=':
if c == '@':
c = '='
self._endian = c
else:
raise ValueError('Cannot set endian-ness')
def _get_endian(self):
return self._endian
ENDIAN = property(fset=_set_endian,
fget=_get_endian,
doc="Possible endian values are '<', '>', '@', '='"
)
def _set_header_prec(self, prec):
if prec in 'hilq':
self._header_prec = prec
else:
raise ValueError('Cannot set header precision')
def _get_header_prec(self):
return self._header_prec
HEADER_PREC = property(fset=_set_header_prec,
fget=_get_header_prec,
doc="Possible header precisions are 'h', 'i', 'l', 'q'"
)
def __init__(self, fname, endian='@', header_prec='i', *args, **kwargs):
"""Open a Fortran unformatted file for writing.
Parameters
----------
endian : character, optional
Specify the endian-ness of the file. Possible values are
'>', '<', '@' and '='. See the documentation of Python's
struct module for their meanings. The deafult is '>' (native
byte order)
header_prec : character, optional
Specify the precision used for the record headers. Possible
values are 'h', 'i', 'l' and 'q' with their meanings from
Python's struct module. The default is 'i' (the system's
default integer).
"""
file.__init__(self, fname, *args, **kwargs)
self.ENDIAN = endian
self.HEADER_PREC = header_prec
def _read_exactly(self, num_bytes):
"""Read in exactly num_bytes, raising an error if it can't be done."""
if _PY3:
data = b''
else:
data = ''
while True:
l = len(data)
if l == num_bytes:
return data
else:
read_data = self.read(num_bytes - l)
if read_data == '':
raise IOError('Could not read enough data.'
' Wanted %d bytes, got %d.' % (num_bytes, l))
data += read_data
def _read_check(self):
return numpy.fromstring(self._read_exactly(self._header_length),
dtype=self.ENDIAN+self.HEADER_PREC
)[0]
def _write_check(self, number_of_bytes):
"""Write the header for the given number of bytes"""
self.write(numpy.array(number_of_bytes,
dtype=self.ENDIAN+self.HEADER_PREC,).tostring()
)
def readRecord(self):
"""Read a single fortran record"""
l = self._read_check()
data_str = self._read_exactly(l)
check_size = self._read_check()
if check_size != l:
raise IOError('Error reading record from data file')
return data_str
def writeRecord(self,s):
"""Write a record with the given bytes.
Parameters
----------
s : the string to write
"""
length_bytes = len(s)
self._write_check(length_bytes)
self.write(s)
self._write_check(length_bytes)
def readString(self):
"""Read a string."""
return self.readRecord()
def writeString(self,s):
"""Write a string
Parameters
----------
s : the string to write
"""
self.writeRecord(s)
_real_precisions = 'df'
def readReals(self, prec='f'):
"""Read in an array of real numbers.
Parameters
----------
prec : character, optional
Specify the precision of the array using character codes from
Python's struct module. Possible values are 'd' and 'f'.
"""
_numpy_precisions = {'d': numpy.float64,
'f': numpy.float32
}
if prec not in self._real_precisions:
raise ValueError('Not an appropriate precision')
data_str = self.readRecord()
return numpy.fromstring(data_str, dtype=self.ENDIAN+prec)
def writeReals(self, reals, prec='f'):
"""Write an array of floats in given precision
Parameters
----------
reals : array
Data to write
prec` : string
Character code for the precision to use in writing
"""
if prec not in self._real_precisions:
raise ValueError('Not an appropriate precision')
nums = numpy.array(reals, dtype=self.ENDIAN+prec)
self.writeRecord(nums.tostring())
_int_precisions = 'hilq'
def readInts(self, prec='i'):
"""Read an array of integers.
Parameters
----------
prec : character, optional
Specify the precision of the data to be read using
character codes from Python's struct module. Possible
values are 'h', 'i', 'l' and 'q'
"""
if prec not in self._int_precisions:
raise ValueError('Not an appropriate precision')
data_str = self.readRecord()
return numpy.fromstring(data_str, dtype=self.ENDIAN+prec)
def writeInts(self, ints, prec='i'):
"""Write an array of integers in given precision
Parameters
----------
reals : array
Data to write
prec : string
Character code for the precision to use in writing
"""
if prec not in self._int_precisions:
raise ValueError('Not an appropriate precision')
nums = numpy.array(ints, dtype=self.ENDIAN+prec)
self.writeRecord(nums.tostring())
| jobovy/mwdust | mwdust/util/fortranfile.py | Python | bsd-3-clause | 8,822 |
import pathlib
from typing import Optional
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.utils import process_bool_arg, process_list_arg
from cumulusci.salesforce_api.metadata import ApiDeploy
from cumulusci.salesforce_api.package_zip import MetadataPackageZipBuilder
from cumulusci.tasks.salesforce.BaseSalesforceMetadataApiTask import (
BaseSalesforceMetadataApiTask,
)
class Deploy(BaseSalesforceMetadataApiTask):
api_class = ApiDeploy
task_options = {
"path": {
"description": "The path to the metadata source to be deployed",
"required": True,
},
"unmanaged": {
"description": "If True, changes namespace_inject to replace tokens with a blank string"
},
"namespace_inject": {
"description": "If set, the namespace tokens in files and filenames are replaced with the namespace's prefix"
},
"namespace_strip": {
"description": "If set, all namespace prefixes for the namespace specified are stripped from files and filenames"
},
"check_only": {
"description": "If True, performs a test deployment (validation) of components without saving the components in the target org"
},
"test_level": {
"description": "Specifies which tests are run as part of a deployment. Valid values: NoTestRun, RunLocalTests, RunAllTestsInOrg, RunSpecifiedTests."
},
"specified_tests": {
"description": "Comma-separated list of test classes to run upon deployment. Applies only with test_level set to RunSpecifiedTests."
},
"static_resource_path": {
"description": "The path where decompressed static resources are stored. Any subdirectories found will be zipped and added to the staticresources directory of the build."
},
"namespaced_org": {
"description": "If True, the tokens %%%NAMESPACED_ORG%%% and ___NAMESPACED_ORG___ will get replaced with the namespace. The default is false causing those tokens to get stripped and replaced with an empty string. Set this if deploying to a namespaced scratch org or packaging org."
},
"clean_meta_xml": {
"description": "Defaults to True which strips the <packageVersions/> element from all meta.xml files. The packageVersion element gets added automatically by the target org and is set to whatever version is installed in the org. To disable this, set this option to False"
},
}
namespaces = {"sf": "http://soap.sforce.com/2006/04/metadata"}
def _init_options(self, kwargs):
super(Deploy, self)._init_options(kwargs)
self.check_only = process_bool_arg(self.options.get("check_only", False))
self.test_level = self.options.get("test_level")
if self.test_level and self.test_level not in [
"NoTestRun",
"RunLocalTests",
"RunAllTestsInOrg",
"RunSpecifiedTests",
]:
raise TaskOptionsError(
f"Specified test run level {self.test_level} is not valid."
)
self.specified_tests = process_list_arg(self.options.get("specified_tests", []))
if bool(self.specified_tests) != (self.test_level == "RunSpecifiedTests"):
raise TaskOptionsError(
"The specified_tests option and test_level RunSpecifiedTests must be used together."
)
self.options["namespace_inject"] = (
self.options.get("namespace_inject")
or self.project_config.project__package__namespace
)
def _get_api(self, path=None):
if not path:
path = self.options.get("path")
package_zip = self._get_package_zip(path)
if package_zip is not None:
self.logger.info("Payload size: {} bytes".format(len(package_zip)))
else:
self.logger.warning("Deployment package is empty; skipping deployment.")
return
return self.api_class(
self,
package_zip,
purge_on_delete=False,
check_only=self.check_only,
test_level=self.test_level,
run_tests=self.specified_tests,
)
def _has_namespaced_package(self, ns: Optional[str]) -> bool:
if "unmanaged" in self.options:
return not process_bool_arg(self.options.get("unmanaged", True))
return bool(ns) and ns in self.org_config.installed_packages
def _is_namespaced_org(self, ns: Optional[str]) -> bool:
if "namespaced_org" in self.options:
return process_bool_arg(self.options.get("namespaced_org", False))
return bool(ns) and ns == self.org_config.namespace
def _get_package_zip(self, path):
assert path, f"Path should be specified for {self.__class__.name}"
if not pathlib.Path(path).exists():
self.logger.warning(f"{path} not found.")
return
namespace = self.options["namespace_inject"]
options = {
**self.options,
"clean_meta_xml": process_bool_arg(
self.options.get("clean_meta_xml", True)
),
"namespace_inject": namespace,
"unmanaged": not self._has_namespaced_package(namespace),
"namespaced_org": self._is_namespaced_org(namespace),
}
package_zip = MetadataPackageZipBuilder(
path=path, options=options, logger=self.logger
)
if not package_zip.zf.namelist():
return
return package_zip.as_base64()
def freeze(self, step):
steps = super(Deploy, self).freeze(step)
for step in steps:
if step["kind"] == "other":
step["kind"] = "metadata"
return steps
| SalesforceFoundation/CumulusCI | cumulusci/tasks/salesforce/Deploy.py | Python | bsd-3-clause | 5,848 |
# Copyright the Karmabot authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'karmabot' and is distributed under the BSD license.
# See LICENSE for more details.
from karmabot.core.facets import Facet
from karmabot.core.commands import CommandSet, thing
import random
predictions = [ "As I see it, yes",
"It is certain",
"It is decidedly so",
"Most likely",
"Outlook good",
"Signs point to yes",
"Without a doubt",
"Yes",
"Yes - definitely",
"You may rely on it",
"Reply hazy, try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"]
@thing.facet_classes.register
class EightBallFacet(Facet):
name = "eightball"
commands = thing.add_child(CommandSet(name))
@classmethod
def does_attach(cls, thing):
return thing.name == "eightball"
@commands.add("shake {thing}", help="shake the magic eightball")
def shake(self, thing, context):
context.reply(random.choice(predictions) + ".")
| chromakode/karmabot | karmabot/extensions/eightball.py | Python | bsd-3-clause | 1,155 |
#! /usr/bin/env python
"""
# control_get_firmware.py: get firmware version of Gemalto readers
# Copyright (C) 2009-2012 Ludovic Rousseau
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
from smartcard.System import readers
from smartcard.pcsc.PCSCPart10 import (SCARD_SHARE_DIRECT,
SCARD_LEAVE_CARD, SCARD_CTL_CODE, getTlvProperties)
for reader in readers():
cardConnection = reader.createConnection()
cardConnection.connect(mode=SCARD_SHARE_DIRECT,
disposition=SCARD_LEAVE_CARD)
print "Reader:", reader
# properties returned by IOCTL_FEATURE_GET_TLV_PROPERTIES
properties = getTlvProperties(cardConnection)
# Gemalto devices supports a control code to get firmware
if properties['PCSCv2_PART10_PROPERTY_wIdVendor'] == 0x08E6:
get_firmware = [0x02]
IOCTL_SMARTCARD_VENDOR_IFD_EXCHANGE = SCARD_CTL_CODE(1)
res = cardConnection.control(IOCTL_SMARTCARD_VENDOR_IFD_EXCHANGE,
get_firmware)
print " Firmware:", "".join([chr(x) for x in res])
else:
print " Not a Gemalto reader"
try:
res = properties['PCSCv2_PART10_PROPERTY_sFirmwareID']
print " Firmware:", frimware
except KeyError:
print " PCSCv2_PART10_PROPERTY_sFirmwareID not supported"
| vicamo/pcsc-lite-android | UnitaryTests/control_get_firmware.py | Python | bsd-3-clause | 1,904 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-31 14:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20170530_1509'),
]
operations = [
migrations.AddField(
model_name='corpus',
name='date_ended',
field=models.DateField(blank=True, null=True, verbose_name='date ended'),
),
migrations.AlterField(
model_name='corpus',
name='date_started',
field=models.DateField(verbose_name='date started'),
),
]
| GeorgiaTechDHLab/TOME | news/migrations/0011_auto_20170531_1426.py | Python | bsd-3-clause | 658 |
import sys
from . import (
utils, env, defs, context, layers, parser, preprocessor, loader, analyzer,
generator)
LAYERS = (
(parser.Parser, "parse"),
(preprocessor.Preprocessor, "transform_ast"),
(loader.Loader, "expand_ast"),
(analyzer.Analyzer, "expand_ast"),
(generator.Generator, "expand_ast")
)
def _get_context_args_from_settings(string, settings):
return {
"main_file_hash": utils.get_string_hash(string),
"main_file_name": settings["main_file_name"],
"module_paths": settings["module_paths"],
"loaded_modules": settings["loaded_modules"],
"test_mode_on": settings["test_mode_on"],
"env": env.Env()
}
def _update_context_args():
return {**context.modified_context_args(), **{"env": env.Env()}}
def compile_string(string, **settings):
context_args = _get_context_args_from_settings(string, settings)
current_ast = string
for layer_cls, method_name in LAYERS:
if settings["stop_before"] == layer_cls:
return current_ast
with context.new_context(**context_args):
layer = layer_cls()
if method_name == "parse":
current_ast = layer.parse(current_ast)
else:
new_ast = getattr(layers, method_name)(
current_ast, registry=layer.get_registry())
if new_ast is not None:
current_ast = list(new_ast)
if settings["stop_after"] == layer_cls:
return current_ast
context_args = _update_context_args()
return "\n".join(current_ast)
#return current_ast
def compile_file(in_file, **settings):
result = compile_string(
utils.read_file(in_file),
main_file_name=in_file.split("/")[-1].split(".")[0], **settings)
if settings["print_ast"]:
for node in result:
print(node)
sys.exit(0)
return result
| adrian-lang/margolith | solyanka/solyanka/__init__.py | Python | bsd-3-clause | 1,936 |
# -*- coding: utf-8 -*-
from bda.plone.discount import UUID_PLONE_ROOT
from bda.plone.discount.tests import Discount_INTEGRATION_TESTING
from bda.plone.discount.tests import set_browserlayer
from plone.uuid.interfaces import IUUID
import unittest2 as unittest
class TestDiscount(unittest.TestCase):
layer = Discount_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
set_browserlayer(self.request)
def test_plone_root_uuid(self):
self.assertEquals(IUUID(self.portal), UUID_PLONE_ROOT)
| TheVirtualLtd/bda.plone.discount | src/bda/plone/discount/tests/test_discount.py | Python | bsd-3-clause | 592 |
'''
YARN Cluster Metrics
--------------------
yarn.metrics.appsSubmitted The number of submitted apps
yarn.metrics.appsCompleted The number of completed apps
yarn.metrics.appsPending The number of pending apps
yarn.metrics.appsRunning The number of running apps
yarn.metrics.appsFailed The number of failed apps
yarn.metrics.appsKilled The number of killed apps
yarn.metrics.reservedMB The size of reserved memory
yarn.metrics.availableMB The amount of available memory
yarn.metrics.allocatedMB The amount of allocated memory
yarn.metrics.totalMB The amount of total memory
yarn.metrics.reservedVirtualCores The number of reserved virtual cores
yarn.metrics.availableVirtualCores The number of available virtual cores
yarn.metrics.allocatedVirtualCores The number of allocated virtual cores
yarn.metrics.totalVirtualCores The total number of virtual cores
yarn.metrics.containersAllocated The number of containers allocated
yarn.metrics.containersReserved The number of containers reserved
yarn.metrics.containersPending The number of containers pending
yarn.metrics.totalNodes The total number of nodes
yarn.metrics.activeNodes The number of active nodes
yarn.metrics.lostNodes The number of lost nodes
yarn.metrics.unhealthyNodes The number of unhealthy nodes
yarn.metrics.decommissionedNodes The number of decommissioned nodes
yarn.metrics.rebootedNodes The number of rebooted nodes
YARN App Metrics
----------------
yarn.app.progress The progress of the application as a percent
yarn.app.startedTime The time in which application started (in ms since epoch)
yarn.app.finishedTime The time in which the application finished (in ms since epoch)
yarn.app.elapsedTime The elapsed time since the application started (in ms)
yarn.app.allocatedMB The sum of memory in MB allocated to the applications running containers
yarn.app.allocatedVCores The sum of virtual cores allocated to the applications running containers
yarn.app.runningContainers The number of containers currently running for the application
yarn.app.memorySeconds The amount of memory the application has allocated (megabyte-seconds)
yarn.app.vcoreSeconds The amount of CPU resources the application has allocated (virtual core-seconds)
YARN Node Metrics
-----------------
yarn.node.lastHealthUpdate The last time the node reported its health (in ms since epoch)
yarn.node.usedMemoryMB The total amount of memory currently used on the node (in MB)
yarn.node.availMemoryMB The total amount of memory currently available on the node (in MB)
yarn.node.usedVirtualCores The total number of vCores currently used on the node
yarn.node.availableVirtualCores The total number of vCores available on the node
yarn.node.numContainers The total number of containers currently running on the node
YARN Capacity Scheduler Metrics
-----------------
yarn.queue.root.maxCapacity The configured maximum queue capacity in percentage for root queue
yarn.queue.root.usedCapacity The used queue capacity in percentage for root queue
yarn.queue.root.capacity The configured queue capacity in percentage for root queue
yarn.queue.numPendingApplications The number of pending applications in this queue
yarn.queue.userAMResourceLimit.memory The maximum memory resources a user can use for Application Masters (in MB)
yarn.queue.userAMResourceLimit.vCores The maximum vCpus a user can use for Application Masters
yarn.queue.absoluteCapacity The absolute capacity percentage this queue can use of entire cluster
yarn.queue.userLimitFactor The minimum user limit percent set in the configuration
yarn.queue.userLimit The user limit factor set in the configuration
yarn.queue.numApplications The number of applications currently in the queue
yarn.queue.usedAMResource.memory The memory resources used for Application Masters (in MB)
yarn.queue.usedAMResource.vCores The vCpus used for Application Masters
yarn.queue.absoluteUsedCapacity The absolute used capacity percentage this queue is using of the entire cluster
yarn.queue.resourcesUsed.memory The total memory resources this queue is using (in MB)
yarn.queue.resourcesUsed.vCores The total vCpus this queue is using
yarn.queue.AMResourceLimit.vCores The maximum vCpus this queue can use for Application Masters
yarn.queue.AMResourceLimit.memory The maximum memory resources this queue can use for Application Masters (in MB)
yarn.queue.capacity The configured queue capacity in percentage relative to its parent queue
yarn.queue.numActiveApplications The number of active applications in this queue
yarn.queue.absoluteMaxCapacity The absolute maximum capacity percentage this queue can use of the entire cluster
yarn.queue.usedCapacity The used queue capacity in percentage
yarn.queue.numContainers The number of containers being used
yarn.queue.maxCapacity The configured maximum queue capacity in percentage relative to its parent queue
yarn.queue.maxApplications The maximum number of applications this queue can have
yarn.queue.maxApplicationsPerUser The maximum number of active applications per user this queue can have
'''
# stdlib
from urlparse import urljoin, urlsplit, urlunsplit
# 3rd party
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
import requests
# Project
from checks import AgentCheck
from config import _is_affirmative
# Default settings
DEFAULT_RM_URI = 'http://localhost:8088'
DEFAULT_TIMEOUT = 5
DEFAULT_CUSTER_NAME = 'default_cluster'
DEFAULT_COLLECT_APP_METRICS = True
MAX_DETAILED_QUEUES = 100
# Path to retrieve cluster metrics
YARN_CLUSTER_METRICS_PATH = '/ws/v1/cluster/metrics'
# Path to retrieve YARN APPS
YARN_APPS_PATH = '/ws/v1/cluster/apps'
# Path to retrieve node statistics
YARN_NODES_PATH = '/ws/v1/cluster/nodes'
# Path to retrieve queue statistics
YARN_SCHEDULER_PATH = '/ws/v1/cluster/scheduler'
# Metric types
GAUGE = 'gauge'
INCREMENT = 'increment'
# Name of the service check
SERVICE_CHECK_NAME = 'yarn.can_connect'
# Application states to collect
YARN_APPLICATION_STATES = 'RUNNING'
# Cluster metrics identifier
YARN_CLUSTER_METRICS_ELEMENT = 'clusterMetrics'
# Cluster metrics for YARN
YARN_CLUSTER_METRICS = {
'appsSubmitted': ('yarn.metrics.apps_submitted', GAUGE),
'appsCompleted': ('yarn.metrics.apps_completed', GAUGE),
'appsPending': ('yarn.metrics.apps_pending', GAUGE),
'appsRunning': ('yarn.metrics.apps_running', GAUGE),
'appsFailed': ('yarn.metrics.apps_failed', GAUGE),
'appsKilled': ('yarn.metrics.apps_killed', GAUGE),
'reservedMB': ('yarn.metrics.reserved_mb', GAUGE),
'availableMB': ('yarn.metrics.available_mb', GAUGE),
'allocatedMB': ('yarn.metrics.allocated_mb', GAUGE),
'totalMB': ('yarn.metrics.total_mb', GAUGE),
'reservedVirtualCores': ('yarn.metrics.reserved_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.metrics.available_virtual_cores', GAUGE),
'allocatedVirtualCores': ('yarn.metrics.allocated_virtual_cores', GAUGE),
'totalVirtualCores': ('yarn.metrics.total_virtual_cores', GAUGE),
'containersAllocated': ('yarn.metrics.containers_allocated', GAUGE),
'containersReserved': ('yarn.metrics.containers_reserved', GAUGE),
'containersPending': ('yarn.metrics.containers_pending', GAUGE),
'totalNodes': ('yarn.metrics.total_nodes', GAUGE),
'activeNodes': ('yarn.metrics.active_nodes', GAUGE),
'lostNodes': ('yarn.metrics.lost_nodes', GAUGE),
'unhealthyNodes': ('yarn.metrics.unhealthy_nodes', GAUGE),
'decommissionedNodes': ('yarn.metrics.decommissioned_nodes', GAUGE),
'rebootedNodes': ('yarn.metrics.rebooted_nodes', GAUGE),
}
# Application metrics for YARN
YARN_APP_METRICS = {
'progress': ('yarn.apps.progress', INCREMENT),
'startedTime': ('yarn.apps.started_time', INCREMENT),
'finishedTime': ('yarn.apps.finished_time', INCREMENT),
'elapsedTime': ('yarn.apps.elapsed_time', INCREMENT),
'allocatedMB': ('yarn.apps.allocated_mb', INCREMENT),
'allocatedVCores': ('yarn.apps.allocated_vcores', INCREMENT),
'runningContainers': ('yarn.apps.running_containers', INCREMENT),
'memorySeconds': ('yarn.apps.memory_seconds', INCREMENT),
'vcoreSeconds': ('yarn.apps.vcore_seconds', INCREMENT),
}
# Node metrics for YARN
YARN_NODE_METRICS = {
'lastHealthUpdate': ('yarn.node.last_health_update', GAUGE),
'usedMemoryMB': ('yarn.node.used_memory_mb', GAUGE),
'availMemoryMB': ('yarn.node.avail_memory_mb', GAUGE),
'usedVirtualCores': ('yarn.node.used_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.node.available_virtual_cores', GAUGE),
'numContainers': ('yarn.node.num_containers', GAUGE),
}
# Root queue metrics for YARN
YARN_ROOT_QUEUE_METRICS = {
'maxCapacity': ('yarn.queue.root.max_capacity', GAUGE),
'usedCapacity': ('yarn.queue.root.used_capacity', GAUGE),
'capacity': ('yarn.queue.root.capacity', GAUGE)
}
# Queue metrics for YARN
YARN_QUEUE_METRICS = {
'numPendingApplications': ('yarn.queue.num_pending_applications', GAUGE),
'userAMResourceLimit.memory': ('yarn.queue.user_am_resource_limit.memory', GAUGE),
'userAMResourceLimit.vCores': ('yarn.queue.user_am_resource_limit.vcores', GAUGE),
'absoluteCapacity': ('yarn.queue.absolute_capacity', GAUGE),
'userLimitFactor': ('yarn.queue.user_limit_factor', GAUGE),
'userLimit': ('yarn.queue.user_limit', GAUGE),
'numApplications': ('yarn.queue.num_applications', GAUGE),
'usedAMResource.memory': ('yarn.queue.used_am_resource.memory', GAUGE),
'usedAMResource.vCores': ('yarn.queue.used_am_resource.vcores', GAUGE),
'absoluteUsedCapacity': ('yarn.queue.absolute_used_capacity', GAUGE),
'resourcesUsed.memory': ('yarn.queue.resources_used.memory', GAUGE),
'resourcesUsed.vCores': ('yarn.queue.resources_used.vcores', GAUGE),
'AMResourceLimit.vCores': ('yarn.queue.am_resource_limit.vcores', GAUGE),
'AMResourceLimit.memory': ('yarn.queue.am_resource_limit.memory', GAUGE),
'capacity': ('yarn.queue.capacity', GAUGE),
'numActiveApplications': ('yarn.queue.num_active_applications', GAUGE),
'absoluteMaxCapacity': ('yarn.queue.absolute_max_capacity', GAUGE),
'usedCapacity' : ('yarn.queue.used_capacity', GAUGE),
'numContainers': ('yarn.queue.num_containers', GAUGE),
'maxCapacity': ('yarn.queue.max_capacity', GAUGE),
'maxApplications': ('yarn.queue.max_applications', GAUGE),
'maxApplicationsPerUser': ('yarn.queue.max_applications_per_user', GAUGE)
}
class YarnCheck(AgentCheck):
'''
Extract statistics from YARN's ResourceManger REST API
'''
_ALLOWED_APPLICATION_TAGS = [
'applicationTags',
'applicationType',
'name',
'queue',
'user'
]
def check(self, instance):
# Get properties from conf file
rm_address = instance.get('resourcemanager_uri', DEFAULT_RM_URI)
app_tags = instance.get('application_tags', {})
queue_blacklist = instance.get('queue_blacklist', [])
if type(app_tags) is not dict:
self.log.error('application_tags is incorrect: %s is not a dictionary', app_tags)
app_tags = {}
filtered_app_tags = {}
for dd_prefix, yarn_key in app_tags.iteritems():
if yarn_key in self._ALLOWED_APPLICATION_TAGS:
filtered_app_tags[dd_prefix] = yarn_key
app_tags = filtered_app_tags
# Collected by default
app_tags['app_name'] = 'name'
# Get additional tags from the conf file
tags = instance.get('tags', [])
if tags is None:
tags = []
else:
tags = list(set(tags))
# Get the cluster name from the conf file
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning("The cluster_name must be specified in the instance configuration, defaulting to '%s'" % (DEFAULT_CUSTER_NAME))
cluster_name = DEFAULT_CUSTER_NAME
tags.append('cluster_name:%s' % cluster_name)
# Get metrics from the Resource Manager
self._yarn_cluster_metrics(rm_address, tags)
if _is_affirmative(instance.get('collect_app_metrics', DEFAULT_COLLECT_APP_METRICS)):
self._yarn_app_metrics(rm_address, app_tags, tags)
self._yarn_node_metrics(rm_address, tags)
self._yarn_scheduler_metrics(rm_address, tags, queue_blacklist)
def _yarn_cluster_metrics(self, rm_address, addl_tags):
'''
Get metrics related to YARN cluster
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_CLUSTER_METRICS_PATH)
if metrics_json:
yarn_metrics = metrics_json[YARN_CLUSTER_METRICS_ELEMENT]
if yarn_metrics is not None:
self._set_yarn_metrics_from_json(addl_tags, yarn_metrics, YARN_CLUSTER_METRICS)
def _yarn_app_metrics(self, rm_address, app_tags, addl_tags):
'''
Get metrics for running applications
'''
metrics_json = self._rest_request_to_json(
rm_address,
YARN_APPS_PATH,
states=YARN_APPLICATION_STATES
)
if (metrics_json and metrics_json['apps'] is not None and
metrics_json['apps']['app'] is not None):
for app_json in metrics_json['apps']['app']:
tags = []
for dd_tag, yarn_key in app_tags.iteritems():
try:
val = app_json[yarn_key]
if val:
tags.append("{tag}:{value}".format(
tag=dd_tag, value=val
))
except KeyError:
self.log.error("Invalid value %s for application_tag", yarn_key)
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, app_json, YARN_APP_METRICS)
def _yarn_node_metrics(self, rm_address, addl_tags):
'''
Get metrics related to YARN nodes
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_NODES_PATH)
if (metrics_json and metrics_json['nodes'] is not None and
metrics_json['nodes']['node'] is not None):
for node_json in metrics_json['nodes']['node']:
node_id = node_json['id']
tags = ['node_id:%s' % str(node_id)]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, node_json, YARN_NODE_METRICS)
def _yarn_scheduler_metrics(self, rm_address, addl_tags, queue_blacklist):
'''
Get metrics from YARN scheduler
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_SCHEDULER_PATH)
try:
metrics_json = metrics_json['scheduler']['schedulerInfo']
if metrics_json['type'] == 'capacityScheduler':
self._yarn_capacity_scheduler_metrics(metrics_json, addl_tags, queue_blacklist)
except KeyError:
pass
def _yarn_capacity_scheduler_metrics(self, metrics_json, addl_tags, queue_blacklist):
'''
Get metrics from YARN scheduler if it's type is capacityScheduler
'''
tags = ['queue_name:%s' % metrics_json['queueName']]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, metrics_json, YARN_ROOT_QUEUE_METRICS)
if metrics_json['queues'] is not None and metrics_json['queues']['queue'] is not None:
queues_count = 0
for queue_json in metrics_json['queues']['queue']:
queue_name = queue_json['queueName']
if queue_name in queue_blacklist:
self.log.debug('Queue "%s" is blacklisted. Ignoring it' % queue_name)
continue
queues_count += 1
if queues_count > MAX_DETAILED_QUEUES:
self.warning("Found more than 100 queues, will only send metrics on first 100 queues. " +
" Please filter the queues with the check's `queue_blacklist` parameter")
break
tags = ['queue_name:%s' % str(queue_name)]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, queue_json, YARN_QUEUE_METRICS)
def _set_yarn_metrics_from_json(self, tags, metrics_json, yarn_metrics):
'''
Parse the JSON response and set the metrics
'''
for dict_path, metric in yarn_metrics.iteritems():
metric_name, metric_type = metric
metric_value = self._get_value_from_json(dict_path, metrics_json)
if metric_value is not None:
self._set_metric(metric_name,
metric_type,
metric_value,
tags)
def _get_value_from_json(self, dict_path, metrics_json):
'''
Get a value from a dictionary under N keys, represented as str("key1.key2...key{n}")
'''
for key in dict_path.split('.'):
if key in metrics_json:
metrics_json = metrics_json.get(key)
else:
return None
return metrics_json
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'''
Set a metric
'''
if metric_type == GAUGE:
self.gauge(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "%s" unknown', metric_type)
def _rest_request_to_json(self, address, object_path, *args, **kwargs):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['url:%s' % self._get_url_base(address)]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
# Add args to the url
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug('Attempting to connect to "%s"' % url)
# Add kwargs as arguments
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for key, value in kwargs.iteritems()])
url = urljoin(url, '?' + query)
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e))
raise
except (HTTPError,
InvalidURL,
ConnectionError) as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request failed: {0}, {1}".format(url, e))
raise
except ValueError as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=str(e))
raise
else:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags,
message='Connection to %s was successful' % url)
return response_json
def _join_url_dir(self, url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
def _get_url_base(self, url):
'''
Return the base of a URL
'''
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', ''])
| StackVista/sts-agent-integrations-core | yarn/check.py | Python | bsd-3-clause | 20,533 |
def extractRoontalesCom(item):
'''
Parser for 'roontales.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractRoontalesCom.py | Python | bsd-3-clause | 539 |
# See e.g. http://stackoverflow.com/a/14076841/931303
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
| jorgecarleitao/public-contracts | main/__init__.py | Python | bsd-3-clause | 140 |
def hanoi(n, source, helper, target):
if n > 0:
# move tower of size n - 1 to helper
hanoi(n - 1, source, target, helper)
# move disk from source peg to target peg
if source:
target.append(source.pop())
# move tower of size n-1 from helper to target
hanoi(n - 1, helper, source, target)
source = [9, 8, 7, 6, 5, 4, 3, 2, 1]
target = []
helper = []
hanoi(len(source), source, helper, target)
print source, helper, target
| talapus/Ophidian | Games/Hanoi/hanoi_prototype4.py | Python | bsd-3-clause | 486 |
import itertools
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from pvlib import atmosphere
from pvlib import solarposition
latitude, longitude, tz, altitude = 32.2, -111, 'US/Arizona', 700
times = pd.date_range(start='20140626', end='20140626', freq='6h', tz=tz)
ephem_data = solarposition.get_solarposition(times, latitude, longitude)
# need to add physical tests instead of just functional tests
def test_pres2alt():
atmosphere.pres2alt(100000)
def test_alt2press():
atmosphere.pres2alt(1000)
@pytest.mark.parametrize("model",
['simple', 'kasten1966', 'youngirvine1967', 'kastenyoung1989',
'gueymard1993', 'young1994', 'pickering2002'])
def test_airmass(model):
out = atmosphere.relativeairmass(ephem_data['zenith'], model)
assert isinstance(out, pd.Series)
out = atmosphere.relativeairmass(ephem_data['zenith'].values, model)
assert isinstance(out, np.ndarray)
def test_airmass_scalar():
assert not np.isnan(atmosphere.relativeairmass(10))
def test_airmass_scalar_nan():
assert np.isnan(atmosphere.relativeairmass(100))
def test_airmass_invalid():
with pytest.raises(ValueError):
atmosphere.relativeairmass(ephem_data['zenith'], 'invalid')
def test_absoluteairmass():
relative_am = atmosphere.relativeairmass(ephem_data['zenith'], 'simple')
atmosphere.absoluteairmass(relative_am)
atmosphere.absoluteairmass(relative_am, pressure=100000)
def test_absoluteairmass_numeric():
atmosphere.absoluteairmass(2)
def test_absoluteairmass_nan():
np.testing.assert_equal(np.nan, atmosphere.absoluteairmass(np.nan))
def test_gueymard94_pw():
temp_air = np.array([0, 20, 40])
relative_humidity = np.array([0, 30, 100])
temps_humids = np.array(
list(itertools.product(temp_air, relative_humidity)))
pws = atmosphere.gueymard94_pw(temps_humids[:, 0], temps_humids[:, 1])
expected = np.array(
[ 0.1 , 0.33702061, 1.12340202, 0.1 ,
1.12040963, 3.73469877, 0.1 , 3.44859767, 11.49532557])
assert_allclose(pws, expected, atol=0.01)
@pytest.mark.parametrize("module_type,expect", [
('cdte', np.array(
[[ 0.9905102 , 0.9764032 , 0.93975028],
[ 1.02928735, 1.01881074, 0.98578821],
[ 1.04750335, 1.03814456, 1.00623986]])),
('monosi', np.array(
[[ 0.9776977 , 1.02043409, 1.03574032],
[ 0.98630905, 1.03055092, 1.04736262],
[ 0.98828494, 1.03299036, 1.05026561]])),
('polysi', np.array(
[[ 0.9770408 , 1.01705849, 1.02613202],
[ 0.98992828, 1.03173953, 1.04260662],
[ 0.99352435, 1.03588785, 1.04730718]])),
('cigs', np.array(
[[ 0.9745919 , 1.02821696, 1.05067895],
[ 0.97529378, 1.02967497, 1.05289307],
[ 0.97269159, 1.02730558, 1.05075651]])),
('asi', np.array(
[[ 1.0555275 , 0.87707583, 0.72243772],
[ 1.11225204, 0.93665901, 0.78487953],
[ 1.14555295, 0.97084011, 0.81994083]]))
])
def test_first_solar_spectral_correction(module_type, expect):
ams = np.array([1, 3, 5])
pws = np.array([1, 3, 5])
ams, pws = np.meshgrid(ams, pws)
out = atmosphere.first_solar_spectral_correction(pws, ams, module_type)
assert_allclose(out, expect, atol=0.001)
def test_first_solar_spectral_correction_supplied():
# use the cdte coeffs
coeffs = (0.87102, -0.040543, -0.00929202, 0.10052, 0.073062, -0.0034187)
out = atmosphere.first_solar_spectral_correction(1, 1, coefficients=coeffs)
expected = 0.99134828
assert_allclose(out, expected, atol=1e-3)
def test_first_solar_spectral_correction_ambiguous():
with pytest.raises(TypeError):
atmosphere.first_solar_spectral_correction(1, 1)
def test_kasten96_lt():
"""Test Linke turbidity factor calculated from AOD, Pwat and AM"""
amp = np.array([1, 3, 5])
pwat = np.array([0, 2.5, 5])
aod_bb = np.array([0, 0.1, 1])
lt_expected = np.array(
[[[1.3802, 2.4102, 11.6802],
[1.16303976, 2.37303976, 13.26303976],
[1.12101907, 2.51101907, 15.02101907]],
[[2.95546945, 3.98546945, 13.25546945],
[2.17435443, 3.38435443, 14.27435443],
[1.99821967, 3.38821967, 15.89821967]],
[[3.37410769, 4.40410769, 13.67410769],
[2.44311797, 3.65311797, 14.54311797],
[2.23134152, 3.62134152, 16.13134152]]]
)
lt = atmosphere.kasten96_lt(*np.meshgrid(amp, pwat, aod_bb))
assert np.allclose(lt, lt_expected, 1e-3)
return lt
def test_angstrom_aod():
"""Test Angstrom turbidity model functions."""
aod550 = 0.15
aod1240 = 0.05
alpha = atmosphere.angstrom_alpha(aod550, 550, aod1240, 1240)
assert np.isclose(alpha, 1.3513924317859232)
aod700 = atmosphere.angstrom_aod_at_lambda(aod550, 550, alpha)
assert np.isclose(aod700, 0.10828110997681031)
def test_bird_hulstrom80_aod_bb():
"""Test Bird_Hulstrom broadband AOD."""
aod380, aod500 = 0.22072480948195175, 0.1614279181106312
bird_hulstrom = atmosphere.bird_hulstrom80_aod_bb(aod380, aod500)
assert np.isclose(0.11738229553812768, bird_hulstrom)
| uvchik/pvlib-python | pvlib/test/test_atmosphere.py | Python | bsd-3-clause | 5,239 |
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from ..core.app import SatchlessApp
from . import models
class OrderApp(SatchlessApp):
app_name = 'order'
namespace = 'order'
order_model = models.Order
order_details_templates = [
'satchless/order/view.html',
'satchless/order/%(order_model)s/view.html'
]
order_list_templates = [
'satchless/order/my_orders.html',
'satchless/order/%(order_model)s/my_orders.html'
]
@method_decorator(login_required)
def index(self, request):
orders = self.order_model.objects.filter(user=request.user)
context = self.get_context_data(request, orders=orders)
format_data = {
'order_model': self.order_model._meta.model_name
}
templates = [p % format_data for p in self.order_list_templates]
return TemplateResponse(request, templates, context)
def details(self, request, order_token):
order = self.get_order(request, order_token=order_token)
context = self.get_context_data(request, order=order)
format_data = {
'order_model': order._meta.model_name
}
templates = [p % format_data for p in self.order_details_templates]
return TemplateResponse(request, templates, context)
def get_order(self, request, order_token):
if request.user.is_authenticated():
orders = self.order_model.objects.filter(user=request.user)
else:
orders = self.order_model.objects.filter(user=None)
order = get_object_or_404(orders, token=order_token)
return order
def get_urls(self, prefix=None):
prefix = prefix or self.app_name
return patterns('',
url(r'^$', self.index, name='index'),
url(r'^(?P<order_token>[0-9a-zA-Z]+)/$', self.details,
name='details'),
)
order_app = OrderApp()
| fusionbox/satchless | satchless/order/app.py | Python | bsd-3-clause | 2,122 |
"""Utility functions for handling and fetching repo archives in zip format."""
from __future__ import absolute_import
import os
import tempfile
from zipfile import ZipFile
import requests
try:
# BadZipfile was renamed to BadZipFile in Python 3.2.
from zipfile import BadZipFile
except ImportError:
from zipfile import BadZipfile as BadZipFile
from cookiecutter.exceptions import InvalidZipRepository
from cookiecutter.prompt import read_repo_password
from cookiecutter.utils import make_sure_path_exists, prompt_and_delete
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None):
"""Download and unpack a zipfile at a given URI.
This will download the zipfile to the cookiecutter repository,
and unpack into a temporary directory.
:param zip_uri: The URI for the zipfile.
:param is_url: Is the zip URI a URL or a file?
:param clone_to_dir: The cookiecutter repository directory
to put the archive into.
:param no_input: Suppress any prompts
:param password: The password to use when unpacking the repository.
"""
# Ensure that clone_to_dir exists
clone_to_dir = os.path.expanduser(clone_to_dir)
make_sure_path_exists(clone_to_dir)
if is_url:
# Build the name of the cached zipfile,
# and prompt to delete if it already exists.
identifier = zip_uri.rsplit('/', 1)[1]
zip_path = os.path.join(clone_to_dir, identifier)
if os.path.exists(zip_path):
download = prompt_and_delete(zip_path, no_input=no_input)
else:
download = True
if download:
# (Re) download the zipfile
r = requests.get(zip_uri, stream=True)
with open(zip_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
# Just use the local zipfile as-is.
zip_path = os.path.abspath(zip_uri)
# Now unpack the repository. The zipfile will be unpacked
# into a temporary directory
try:
zip_file = ZipFile(zip_path)
if len(zip_file.namelist()) == 0:
raise InvalidZipRepository(
'Zip repository {} is empty'.format(zip_uri)
)
# The first record in the zipfile should be the directory entry for
# the archive. If it isn't a directory, there's a problem.
first_filename = zip_file.namelist()[0]
if not first_filename.endswith('/'):
raise InvalidZipRepository(
'Zip repository {} does not include '
'a top-level directory'.format(zip_uri)
)
# Construct the final target directory
project_name = first_filename[:-1]
unzip_base = tempfile.mkdtemp()
unzip_path = os.path.join(unzip_base, project_name)
# Extract the zip file into the temporary directory
try:
zip_file.extractall(path=unzip_base)
except RuntimeError:
# File is password protected; try to get a password from the
# environment; if that doesn't work, ask the user.
if password is not None:
try:
zip_file.extractall(
path=unzip_base,
pwd=password.encode('utf-8')
)
except RuntimeError:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
elif no_input:
raise InvalidZipRepository(
'Unable to unlock password protected repository'
)
else:
retry = 0
while retry is not None:
try:
password = read_repo_password('Repo password')
zip_file.extractall(
path=unzip_base,
pwd=password.encode('utf-8')
)
retry = None
except RuntimeError:
retry += 1
if retry == 3:
raise InvalidZipRepository(
'Invalid password provided '
'for protected repository'
)
except BadZipFile:
raise InvalidZipRepository(
'Zip repository {} is not a valid zip archive:'.format(zip_uri)
)
return unzip_path
| luzfcb/cookiecutter | cookiecutter/zipfile.py | Python | bsd-3-clause | 4,640 |
from base import StreetAddressValidation, AddressValidation
UPS_XAV_CONNECTION = 'https://onlinetools.ups.com/ups.app/xml/XAV'
UPS_XAV_CONNECTION_TEST = 'https://wwwcie.ups.com/ups.app/xml/XAV'
UPS_AV_CONNECTION = 'https://onlinetools.ups.com/ups.app/xml/AV'
UPS_AV_CONNECTION_TEST = 'https://wwwcie.ups.com/ups.app/xml/AV'
| cuker/python-ups | ups/addressvalidation/__init__.py | Python | bsd-3-clause | 326 |
import code
import sys
from awesomestream.jsonrpc import Client
def main():
try:
host = sys.argv[1]
except IndexError:
host = 'http://localhost:9997/'
banner = """>>> from awesomestream.jsonrpc import Client
>>> c = Client('%s')""" % (host,)
c = Client(host)
code.interact(banner, local={'Client': Client, 'c': c})
if __name__ == '__main__':
main() | ericflo/awesomestream | awesomestream/repl.py | Python | bsd-3-clause | 391 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "Marco Sirabella, Owen Davies"
__copyright__ = ""
__credits__ = "Marco Sirabella, Owen Davies"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "Marco Sirabella, Owen Davies"
__email__ = "[email protected], [email protected]"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode())
print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg
print('received message: {}'.format(fullmsg))
sock.close()
connect()
| mjsir911/pymessage | client.py | Python | bsd-3-clause | 1,124 |
from diesel import Loop, fork, Application, sleep
def sleep_and_print(num):
sleep(1)
print num
sleep(1)
a.halt()
def forker():
for x in xrange(5):
fork(sleep_and_print, x)
a = Application()
a.add_loop(Loop(forker))
a.run()
| dieseldev/diesel | examples/forker.py | Python | bsd-3-clause | 255 |
# -*- coding: utf-8 -*-
"""
zine.forms
~~~~~~~~~~
The form classes the zine core uses.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from copy import copy
from datetime import datetime
from zine.i18n import _, lazy_gettext, list_languages
from zine.application import get_application, get_request, emit_event
from zine.config import DEFAULT_VARS
from zine.database import db, posts
from zine.models import User, Group, Comment, Post, Category, Tag, \
NotificationSubscription, STATUS_DRAFT, STATUS_PUBLISHED, \
STATUS_PROTECTED, STATUS_PRIVATE, \
COMMENT_UNMODERATED, COMMENT_MODERATED, \
COMMENT_BLOCKED_USER, COMMENT_BLOCKED_SPAM, COMMENT_DELETED
from zine.parsers import render_preview
from zine.privileges import bind_privileges
from zine.notifications import send_notification_template, NEW_COMMENT, \
COMMENT_REQUIRES_MODERATION
from zine.utils import forms, log, dump_json
from zine.utils.http import redirect_to
from zine.utils.validators import ValidationError, is_valid_email, \
is_valid_url, is_valid_slug, is_not_whitespace_only
from zine.utils.redirects import register_redirect, change_url_prefix
def config_field(cfgvar, label=None, **kwargs):
"""Helper function for fetching fields from the config."""
if isinstance(cfgvar, forms.Field):
field = copy(cfgvar)
else:
field = copy(DEFAULT_VARS[cfgvar])
field._position_hint = forms._next_position_hint()
if label is not None:
field.label = label
for name, value in kwargs.iteritems():
setattr(field, name, value)
return field
class LoginForm(forms.Form):
"""The form for the login page."""
user = forms.ModelField(User, 'username', required=True, messages=dict(
not_found=lazy_gettext(u'User “%(value)s” does not exist.'),
required=lazy_gettext(u'You have to enter a username.')
), on_not_found=lambda user:
log.warning(_(u'Failed login attempt, user “%s” does not exist')
% user, 'auth')
)
password = forms.TextField(widget=forms.PasswordInput)
permanent = forms.BooleanField()
def context_validate(self, data):
if not data['user'].check_password(data['password']):
log.warning(_(u'Failed login attempt from “%s”, invalid password')
% data['user'].username, 'auth')
raise ValidationError(_('Incorrect password.'))
class ChangePasswordForm(forms.Form):
"""The form used on the password-change dialog in the admin panel."""
old_password = forms.TextField(lazy_gettext(u'Old password'), required=True,
widget=forms.PasswordInput)
new_password = forms.TextField(lazy_gettext(u'New password'), required=True,
widget=forms.PasswordInput)
check_password = forms.TextField(lazy_gettext(u'Repeat password'),
required=True,
widget=forms.PasswordInput)
def __init__(self, user, initial=None):
forms.Form.__init__(self, initial)
self.user = user
def validate_old_password(self, value):
if not self.user.check_password(value):
raise ValidationError(_('The old password you\'ve '
'entered is wrong.'))
def context_validate(self, data):
if data['new_password'] != data['check_password']:
raise ValidationError(_('The two passwords don\'t match.'))
class NewCommentForm(forms.Form):
"""New comment form for authors."""
# implementation detail: the maximum length of the column in the
# database is longer than that. However we don't want users to
# insert too long names there. The long column is reserved for
# pingbacks and such.
author = forms.TextField(lazy_gettext(u'Name*'), required=True,
max_length=100, messages=dict(
too_long=lazy_gettext(u'Your name is too long.'),
required=lazy_gettext(u'You have to enter your name.')
))
email = forms.TextField(lazy_gettext(u'Mail* (not published)'),
required=True, validators=[is_valid_email()],
messages=dict(
required=lazy_gettext(u'You have to enter a valid e-mail address.')
))
www = forms.TextField(lazy_gettext(u'Website'), validators=[is_valid_url(
message=lazy_gettext(u'You have to enter a valid URL or omit the field.')
)])
body = forms.TextField(lazy_gettext(u'Text'), min_length=2, max_length=6000,
required=True, messages=dict(
too_short=lazy_gettext(u'Your comment is too short.'),
too_long=lazy_gettext(u'Your comment is too long.'),
required=lazy_gettext(u'You have to enter a comment.')
), widget=forms.Textarea)
parent = forms.HiddenModelField(Comment)
def __init__(self, post, user, initial=None):
forms.Form.__init__(self, initial)
self.req = get_request()
self.post = post
self.user = user
# if the user is logged in the form is a bit smaller
if user.is_somebody:
del self.fields['author'], self.fields['email'], self.fields['www']
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.small_form = self.user.is_somebody
return widget
def validate_parent(self, value):
if value.post != self.post:
#_ this message is only displayed if the user tempered with
#_ the form data
raise ValidationError(_('Invalid object referenced.'))
def context_validate(self, data):
if not self.post.comments_enabled:
raise ValidationError(_('Post is closed for commenting.'))
if self.post.comments_closed:
raise ValidationError(_('Commenting is no longer possible.'))
def make_comment(self):
"""A handy helper to create a comment from the validated form."""
ip = self.req and self.req.remote_addr or '0.0.0.0'
if self.user.is_somebody:
author = self.user
email = www = None
else:
author = self['author']
email = self['email']
www = self['www']
return Comment(self.post, author, self['body'], email, www,
self['parent'], submitter_ip=ip)
def create_if_valid(self, req):
"""The one-trick pony for commenting. Passed a req it tries to
use the req data to submit a comment to the post. If the req
is not a post req or the form is invalid the return value is None,
otherwise a redirect response to the new comment.
"""
if req.method != 'POST' or not self.validate(req.form):
return
# if we don't have errors let's save it and emit an
# `before-comment-saved` event so that plugins can do
# block comments so that administrators have to approve it
comment = self.make_comment()
#! use this event to block comments before they are saved. This
#! is useful for antispam and other ways of moderation.
emit_event('before-comment-saved', req, comment)
# Moderate Comment? Now that the spam check any everything
# went through the processing we explicitly set it to
# unmodereated if the blog configuration demands that
if not comment.blocked and comment.requires_moderation:
comment.status = COMMENT_UNMODERATED
comment.blocked_msg = _(u'Comment waiting for approval')
#! this is sent directly after the comment was saved. Useful if
#! you want to send mail notifications or whatever.
emit_event('after-comment-saved', req, comment)
# Commit so that make_visible_for_request can access the comment id.
db.commit()
# send out a notification if the comment is not spam. Nobody is
# interested in notifications on spam...
if not comment.is_spam:
if comment.blocked:
notification_type = COMMENT_REQUIRES_MODERATION
else:
notification_type = NEW_COMMENT
send_notification_template(notification_type,
'notifications/on_new_comment.zeml',
user=req.user, comment=comment)
# Still allow the user to see his comment if it's blocked
if comment.blocked:
comment.make_visible_for_request(req)
return redirect_to(self.post)
class PluginForm(forms.Form):
"""The form for plugin activation and deactivation."""
active_plugins = forms.MultiChoiceField(widget=forms.CheckboxGroup)
disable_guard = forms.BooleanField(lazy_gettext(u'Disable plugin guard'),
help_text=lazy_gettext(u'If the plugin guard is disabled errors '
u'on plugin setup are not caught.'))
def __init__(self, initial=None):
self.app = app = get_application()
self.active_plugins.choices = sorted([(x.name, x.display_name)
for x in app.plugins.values()],
key=lambda x: x[1].lower())
if initial is None:
initial = dict(
active_plugins=[x.name for x in app.plugins.itervalues()
if x.active]
)
forms.Form.__init__(self, initial)
def apply(self):
"""Apply the changes."""
t = self.app.cfg.edit()
t['plugins'] = u', '.join(sorted(self.data['active_plugins']))
t.commit()
class RemovePluginForm(forms.Form):
"""Dummy form for plugin removing."""
def __init__(self, plugin):
forms.Form.__init__(self)
self.plugin = plugin
class PostForm(forms.Form):
"""This is the baseclass for all forms that deal with posts. There are
two builtin subclasses for the builtin content types 'entry' and 'page'.
"""
title = forms.TextField(lazy_gettext(u'Title'), max_length=150,
validators=[is_not_whitespace_only()],
required=False)
text = forms.TextField(lazy_gettext(u'Text'), max_length=65000,
widget=forms.Textarea)
status = forms.ChoiceField(lazy_gettext(u'Publication status'), choices=[
(STATUS_DRAFT, lazy_gettext(u'Draft')),
(STATUS_PUBLISHED, lazy_gettext(u'Published')),
(STATUS_PROTECTED, lazy_gettext(u'Protected')),
(STATUS_PRIVATE, lazy_gettext(u'Private'))])
pub_date = forms.DateTimeField(lazy_gettext(u'Publication date'),
help_text=lazy_gettext(u'Clear this field to update to current time'))
slug = forms.TextField(lazy_gettext(u'Slug'), validators=[is_valid_slug()],
help_text=lazy_gettext(u'Clear this field to autogenerate a new slug'))
author = forms.ModelField(User, 'username', lazy_gettext('Author'),
widget=forms.SelectBox)
tags = forms.CommaSeparated(forms.TextField(), lazy_gettext(u'Tags'))
categories = forms.Multiple(forms.ModelField(Category, 'id'),
lazy_gettext(u'Categories'),
widget=forms.CheckboxGroup)
parser = forms.ChoiceField(lazy_gettext(u'Parser'))
comments_enabled = forms.BooleanField(lazy_gettext(u'Enable comments'))
pings_enabled = forms.BooleanField(lazy_gettext(u'Enable pingbacks'))
ping_links = forms.BooleanField(lazy_gettext(u'Ping links'))
#: the content type for this field.
content_type = None
def __init__(self, post=None, initial=None):
self.app = get_application()
self.post = post
self.preview = False
if post is not None:
initial = forms.fill_dict(initial,
title=post.title,
text=post.text,
status=post.status,
pub_date=post.pub_date,
slug=post.slug,
author=post.author,
tags=[x.name for x in post.tags],
categories=[x.id for x in post.categories],
parser=post.parser,
comments_enabled=post.comments_enabled,
pings_enabled=post.pings_enabled,
ping_links=not post.parser_missing
)
else:
initial = forms.fill_dict(initial, status=STATUS_DRAFT)
# if we have a request, we can use the current user as a default
req = get_request()
if req and req.user:
initial['author'] = req.user
initial.setdefault('parser', self.app.cfg['default_parser'])
self.author.choices = [x.username for x in User.query.all()]
self.parser.choices = self.app.list_parsers()
self.parser_missing = post and post.parser_missing
if self.parser_missing:
self.parser.choices.append((post.parser, _('%s (missing)') %
post.parser.title()))
self.categories.choices = [(c.id, c.name) for c in
Category.query.all()]
forms.Form.__init__(self, initial)
# if we have have an old post and the parser is not missing and
# it was published when the form was created we collect the old
# posts so that we don't have to ping them another time.
self._old_links = set()
if self.post is not None and not self.post.parser_missing and \
self.post.is_published:
self._old_links.update(self.post.find_urls())
def validate(self, data):
"""We only validate if we're not in preview mode."""
self.preview = 'preview' in data
return forms.Form.validate(self, data) and not self.preview
def find_new_links(self):
"""Return a list of all new links."""
for link in self.post.find_urls():
if not link in self._old_links:
yield link
def validate_slug(self, value):
"""Make sure the slug is unique."""
query = Post.query.filter_by(slug=value)
if self.post is not None:
query = query.filter(Post.id != self.post.id)
existing = query.first()
if existing is not None:
raise ValidationError(_('This slug is already in use.'))
def validate_parser(self, value):
"""Make sure the missing parser is not selected."""
if self.parser_missing and value == self.post.parser:
raise ValidationError(_('Selected parser is no longer '
'available on the system.'))
def render_preview(self):
"""Renders the preview for the post."""
return render_preview(self.data['text'], self.data['parser'])
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.new = self.post is None
widget.post = self.post
widget.preview = self.preview
widget.render_preview = self.render_preview
widget.parser_missing = self.parser_missing
return widget
def make_post(self):
"""A helper function that creates a post object from the data."""
data = self.data
post = Post(data['title'], data['author'], data['text'], data['slug'],
parser=data['parser'], content_type=self.content_type,
pub_date=data['pub_date'])
post.bind_categories(data['categories'])
post.bind_tags(data['tags'])
self._set_common_attributes(post)
self.post = post
return post
def save_changes(self):
"""Save the changes back to the database. This also adds a redirect
if the slug changes.
"""
if not self.data['pub_date']:
# If user deleted publication timestamp, make a new one.
self.data['pub_date'] = datetime.utcnow()
old_slug = self.post.slug
old_parser = self.post.parser
forms.set_fields(self.post, self.data, 'title', 'author', 'parser')
if (self.data['text'] != self.post.text
or self.data['parser'] != old_parser):
self.post.text = self.data['text']
add_redirect = self.post.is_published and old_slug != self.post.slug
self.post.touch_times(self.data['pub_date'])
self.post.bind_slug(self.data['slug'])
self._set_common_attributes(self.post)
if add_redirect:
register_redirect(old_slug, self.post.slug)
def _set_common_attributes(self, post):
forms.set_fields(post, self.data, 'comments_enabled', 'pings_enabled',
'status')
post.bind_categories(self.data['categories'])
post.bind_tags(self.data['tags'])
def taglist(self):
"""Return all available tags as a JSON-encoded list."""
tags = [t.name for t in Tag.query.all()]
return dump_json(tags)
class EntryForm(PostForm):
content_type = 'entry'
def __init__(self, post=None, initial=None):
app = get_application()
PostForm.__init__(self, post, forms.fill_dict(initial,
comments_enabled=app.cfg['comments_enabled'],
pings_enabled=app.cfg['pings_enabled'],
ping_links=True
))
class PageForm(PostForm):
content_type = 'page'
class PostDeleteForm(forms.Form):
"""Baseclass for deletion forms of posts."""
def __init__(self, post=None, initial=None):
self.app = get_application()
self.post = post
forms.Form.__init__(self, initial)
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.post = self.post
return widget
def delete_post(self):
"""Deletes the post from the db."""
emit_event('before-post-deleted', self.post)
db.delete(self.post)
class _CommentBoundForm(forms.Form):
"""Internal baseclass for comment bound forms."""
def __init__(self, comment, initial=None):
self.app = get_application()
self.comment = comment
forms.Form.__init__(self, initial)
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.comment = self.comment
return widget
class EditCommentForm(_CommentBoundForm):
"""Form for comment editing in admin."""
author = forms.TextField(lazy_gettext(u'Author'), required=True)
email = forms.TextField(lazy_gettext(u'Email'),
validators=[is_valid_email()])
www = forms.TextField(lazy_gettext(u'Website'),
validators=[is_valid_url()])
text = forms.TextField(lazy_gettext(u'Text'), widget=forms.Textarea)
pub_date = forms.DateTimeField(lazy_gettext(u'Date'), required=True)
parser = forms.ChoiceField(lazy_gettext(u'Parser'), required=True)
blocked = forms.BooleanField(lazy_gettext(u'Block Comment'))
blocked_msg = forms.TextField(lazy_gettext(u'Reason'))
def __init__(self, comment, initial=None):
_CommentBoundForm.__init__(self, comment, forms.fill_dict(initial,
author=comment.author,
email=comment.email,
www=comment.www,
text=comment.text,
pub_date=comment.pub_date,
parser=comment.parser,
blocked=comment.blocked,
blocked_msg=comment.blocked_msg
))
self.parser.choices = self.app.list_parsers()
self.parser_missing = comment.parser_missing
if self.parser_missing and comment.parser is not None:
self.parser.choices.append((comment.parser, _('%s (missing)') %
comment.parser.title()))
def save_changes(self):
"""Save the changes back to the database."""
old_parser = self.comment.parser
forms.set_fields(self.comment, self.data, 'pub_date', 'parser',
'blocked_msg')
if (self.data['text'] != self.comment.text
or self.data['parser'] != old_parser):
self.comment.text = self.data['text']
# update status
if self.data['blocked']:
if not self.comment.blocked:
self.comment.status = COMMENT_BLOCKED_USER
else:
self.comment.status = COMMENT_MODERATED
# only apply these if the comment is not anonymous
if self.comment.anonymous:
forms.set_fields(self.comment, self.data, 'author', 'email', 'www')
class DeleteCommentForm(_CommentBoundForm):
"""Helper form that is used to delete comments."""
def delete_comment(self):
"""Deletes the comment from the db."""
delete_comment(self.comment)
class ApproveCommentForm(_CommentBoundForm):
"""Helper form for comment approvement."""
def approve_comment(self):
"""Approve the comment."""
#! plugins can use this to react to comment approvals.
emit_event('before-comment-approved', self.comment)
self.comment.status = COMMENT_MODERATED
self.comment.blocked_msg = u''
class BlockCommentForm(_CommentBoundForm):
"""Form used to block comments."""
message = forms.TextField(lazy_gettext(u'Reason'))
def __init__(self, comment, initial=None):
self.req = get_request()
_CommentBoundForm.__init__(self, comment, initial)
def block_comment(self):
msg = self.data['message']
if not msg and self.req:
msg = _(u'blocked by %s') % self.req.user.display_name
self.comment.status = COMMENT_BLOCKED_USER
self.comment.bocked_msg = msg
class MarkCommentForm(_CommentBoundForm):
"""Form used to block comments."""
def __init__(self, comment, initial=None):
self.req = get_request()
_CommentBoundForm.__init__(self, comment, initial)
def mark_as_spam(self):
emit_event('before-comment-mark-spam', self.comment)
self.comment.status = COMMENT_BLOCKED_SPAM
self.comment.blocked_msg = _("Comment reported as spam by %s" %
get_request().user.display_name)
def mark_as_ham(self):
emit_event('before-comment-mark-ham', self.comment)
emit_event('before-comment-approved', self.comment)
self.comment.status = COMMENT_MODERATED
self.comment.blocked_msg = u''
class _CategoryBoundForm(forms.Form):
"""Internal baseclass for category bound forms."""
def __init__(self, category, initial=None):
self.app = get_application()
self.category = category
forms.Form.__init__(self, initial)
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.category = self.category
widget.new = self.category is None
return widget
class EditCategoryForm(_CategoryBoundForm):
"""Form that is used to edit or create a category."""
slug = forms.TextField(lazy_gettext(u'Slug'), validators=[is_valid_slug()])
name = forms.TextField(lazy_gettext(u'Name'), max_length=50, required=True,
validators=[is_not_whitespace_only()])
description = forms.TextField(lazy_gettext(u'Description'),
max_length=5000, widget=forms.Textarea)
def __init__(self, category=None, initial=None):
if category is not None:
initial = forms.fill_dict(initial,
slug=category.slug,
name=category.name,
description=category.description
)
_CategoryBoundForm.__init__(self, category, initial)
def validate_slug(self, value):
"""Make sure the slug is unique."""
query = Category.query.filter_by(slug=value)
if self.category is not None:
query = query.filter(Category.id != self.category.id)
existing = query.first()
if existing is not None:
raise ValidationError(_('This slug is already in use'))
def make_category(self):
"""A helper function that creates a category object from the data."""
category = Category(self.data['name'], self.data['description'],
self.data['slug'] or None)
self.category = category
return category
def save_changes(self):
"""Save the changes back to the database. This also adds a redirect
if the slug changes.
"""
old_slug = self.category.slug
forms.set_fields(self.category, self.data, 'name', 'description')
if self.data['slug']:
self.category.slug = self.data['slug']
elif not self.category.slug:
self.category.set_auto_slug()
if old_slug != self.category.slug:
register_redirect(old_slug, self.category.slug)
class DeleteCategoryForm(_CategoryBoundForm):
"""Used for deleting categories."""
def delete_category(self):
"""Delete the category from the database."""
#! plugins can use this to react to category deletes. They can't stop
#! the deleting of the category but they can delete information in
#! their own tables so that the database is consistent afterwards.
emit_event('before-category-deleted', self.category)
db.delete(self.category)
class CommentMassModerateForm(forms.Form):
"""This form is used for comment mass moderation."""
selected_comments = forms.MultiChoiceField(widget=forms.CheckboxGroup)
per_page = forms.ChoiceField(choices=[20, 40, 60, 80, 100],
label=lazy_gettext('Comments Per Page:'))
def __init__(self, comments, initial=None):
self.comments = comments
self.selected_comments.choices = [c.id for c in self.comments]
forms.Form.__init__(self, initial)
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.comments = self.comments
return widget
def iter_selection(self):
selection = set(self.data['selected_comments'])
for comment in self.comments:
if comment.id in selection:
yield comment
def delete_selection(self):
for comment in self.iter_selection():
delete_comment(comment)
def approve_selection(self, comment=None):
if comment:
emit_event('before-comment-approved', comment)
comment.status = COMMENT_MODERATED
comment.blocked_msg = u''
else:
for comment in self.iter_selection():
emit_event('before-comment-approved', comment)
comment.status = COMMENT_MODERATED
comment.blocked_msg = u''
def block_selection(self):
for comment in self.iter_selection():
emit_event('before-comment-blocked', comment)
comment.status = COMMENT_BLOCKED_USER
comment.blocked_msg = _("Comment blocked by %s" %
get_request().user.display_name)
def mark_selection_as_spam(self):
for comment in self.iter_selection():
emit_event('before-comment-mark-spam', comment)
comment.status = COMMENT_BLOCKED_SPAM
comment.blocked_msg = _("Comment marked as spam by %s" %
get_request().user.display_name)
def mark_selection_as_ham(self):
for comment in self.iter_selection():
emit_event('before-comment-mark-ham', comment)
self.approve_selection(comment)
class _GroupBoundForm(forms.Form):
"""Internal baseclass for group bound forms."""
def __init__(self, group, initial=None):
forms.Form.__init__(self, initial)
self.app = get_application()
self.group = group
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.group = self.group
widget.new = self.group is None
return widget
class EditGroupForm(_GroupBoundForm):
"""Edit or create a group."""
groupname = forms.TextField(lazy_gettext(u'Groupname'), max_length=30,
validators=[is_not_whitespace_only()],
required=True)
privileges = forms.MultiChoiceField(lazy_gettext(u'Privileges'),
widget=forms.CheckboxGroup)
def __init__(self, group=None, initial=None):
if group is not None:
initial = forms.fill_dict(initial,
groupname=group.name,
privileges=[x.name for x in group.privileges]
)
_GroupBoundForm.__init__(self, group, initial)
self.privileges.choices = self.app.list_privileges()
def validate_groupname(self, value):
query = Group.query.filter_by(name=value)
if self.group is not None:
query = query.filter(Group.id != self.group.id)
if query.first() is not None:
raise ValidationError(_('This groupname is already in use'))
def _set_common_attributes(self, group):
forms.set_fields(group, self.data)
bind_privileges(group.privileges, self.data['privileges'])
def make_group(self):
"""A helper function that creates a new group object."""
group = Group(self.data['groupname'])
self._set_common_attributes(group)
self.group = group
return group
def save_changes(self):
"""Apply the changes."""
self.group.name = self.data['groupname']
self._set_common_attributes(self.group)
class DeleteGroupForm(_GroupBoundForm):
"""Used to delete a group from the admin panel."""
action = forms.ChoiceField(lazy_gettext(u'What should Zine do with users '
u'assigned to this group?'),
choices=[
('delete_membership', lazy_gettext(u'Do nothing, just detach the membership')),
('relocate', lazy_gettext(u'Move the users to another group'))
], widget=forms.RadioButtonGroup)
relocate_to = forms.ModelField(Group, 'id', lazy_gettext(u'Relocate users to'),
widget=forms.SelectBox)
def __init__(self, group, initial=None):
self.relocate_to.choices = [('', u'')] + [
(g.id, g.name) for g in Group.query.filter(Group.id != group.id)
]
_GroupBoundForm.__init__(self, group, forms.fill_dict(initial,
action='delete_membership'))
def context_validate(self, data):
if data['action'] == 'relocate' and not data['relocate_to']:
raise ValidationError(_('You have to select a group that '
'gets the users assigned.'))
def delete_group(self):
"""Deletes a group."""
if self.data['action'] == 'relocate':
new_group = Group.query.filter_by(self.data['reassign_to'].id).first()
for user in self.group.users:
if not new_group in user.groups:
user.groups.append(new_group)
db.commit()
#! plugins can use this to react to user deletes. They can't stop
#! the deleting of the group but they can delete information in
#! their own tables so that the database is consistent afterwards.
#! Additional to the group object the form data is submitted.
emit_event('before-group-deleted', self.group, self.data)
db.delete(self.group)
class _UserBoundForm(forms.Form):
"""Internal baseclass for user bound forms."""
def __init__(self, user, initial=None):
forms.Form.__init__(self, initial)
self.app = get_application()
self.user = user
def as_widget(self):
widget = forms.Form.as_widget(self)
widget.user = self.user
widget.new = self.user is None
return widget
class EditUserForm(_UserBoundForm):
"""Edit or create a user."""
username = forms.TextField(lazy_gettext(u'Username'), max_length=30,
validators=[is_not_whitespace_only()],
required=True)
real_name = forms.TextField(lazy_gettext(u'Realname'), max_length=180)
display_name = forms.ChoiceField(lazy_gettext(u'Display name'))
description = forms.TextField(lazy_gettext(u'Description'),
max_length=5000, widget=forms.Textarea)
email = forms.TextField(lazy_gettext(u'Email'), required=True,
validators=[is_valid_email()])
www = forms.TextField(lazy_gettext(u'Website'),
validators=[is_valid_url()])
password = forms.TextField(lazy_gettext(u'Password'),
widget=forms.PasswordInput)
privileges = forms.MultiChoiceField(lazy_gettext(u'Privileges'),
widget=forms.CheckboxGroup)
groups = forms.MultiChoiceField(lazy_gettext(u'Groups'),
widget=forms.CheckboxGroup)
is_author = forms.BooleanField(lazy_gettext(u'List as author'),
help_text=lazy_gettext(u'This user is listed as author'))
def __init__(self, user=None, initial=None):
if user is not None:
initial = forms.fill_dict(initial,
username=user.username,
real_name=user.real_name,
display_name=user._display_name,
description=user.description,
email=user.email,
www=user.www,
privileges=[x.name for x in user.own_privileges],
groups=[g.name for g in user.groups],
is_author=user.is_author
)
_UserBoundForm.__init__(self, user, initial)
self.display_name.choices = [
(u'$username', user and user.username or _('Username')),
(u'$real_name', user and user.real_name or _('Realname'))
]
self.privileges.choices = self.app.list_privileges()
self.groups.choices = [g.name for g in Group.query.all()]
self.password.required = user is None
def validate_username(self, value):
query = User.query.filter_by(username=value)
if self.user is not None:
query = query.filter(User.id != self.user.id)
if query.first() is not None:
raise ValidationError(_('This username is already in use'))
def _set_common_attributes(self, user):
forms.set_fields(user, self.data, 'www', 'real_name', 'description',
'display_name', 'is_author')
bind_privileges(user.own_privileges, self.data['privileges'], user)
bound_groups = set(g.name for g in user.groups)
choosen_groups = set(self.data['groups'])
group_mapping = dict((g.name, g) for g in Group.query.all())
# delete groups
for group in (bound_groups - choosen_groups):
user.groups.remove(group_mapping[group])
# and add new groups
for group in (choosen_groups - bound_groups):
user.groups.append(group_mapping[group])
def make_user(self):
"""A helper function that creates a new user object."""
user = User(self.data['username'], self.data['password'],
self.data['email'])
self._set_common_attributes(user)
self.user = user
return user
def save_changes(self):
"""Apply the changes."""
self.user.username = self.data['username']
if self.data['password']:
self.user.set_password(self.data['password'])
self.user.email = self.data['email']
self._set_common_attributes(self.user)
class DeleteUserForm(_UserBoundForm):
"""Used to delete a user from the admin panel."""
action = forms.ChoiceField(lazy_gettext(u'What should Zine do with posts '
u'written by this user?'), choices=[
('delete', lazy_gettext(u'Delete them permanently')),
('reassign', lazy_gettext(u'Reassign posts'))
], widget=forms.RadioButtonGroup)
reassign_to = forms.ModelField(User, 'id',
lazy_gettext(u'Reassign posts to'),
widget=forms.SelectBox)
def __init__(self, user, initial=None):
self.reassign_to.choices = [('', u'')] + [
(u.id, u.username)
for u in User.query.filter(User.id != user.id)
]
_UserBoundForm.__init__(self, user, forms.fill_dict(initial,
action='reassign'
))
def context_validate(self, data):
if self.user.posts.count() is 0:
data['action'] = None
if data['action'] == 'reassign' and not data['reassign_to']:
raise ValidationError(_('You have to select a user to reassign '
'the posts to.'))
def delete_user(self):
"""Deletes the user."""
if self.data['action'] == 'reassign':
db.execute(posts.update(posts.c.author_id == self.user.id), dict(
author_id=self.data['reassign_to'].id
))
# find all the comments by this author and make them comments that
# are no longer linked to the author.
for comment in self.user.comments.all():
comment.unbind_user()
#! plugins can use this to react to user deletes. They can't stop
#! the deleting of the user but they can delete information in
#! their own tables so that the database is consistent afterwards.
#! Additional to the user object the form data is submitted.
emit_event('before-user-deleted', self.user, self.data)
db.delete(self.user)
class EditProfileForm(_UserBoundForm):
"""Edit or create a user's profile."""
username = forms.TextField(lazy_gettext(u'Username'), max_length=30,
validators=[is_not_whitespace_only()],
required=True)
real_name = forms.TextField(lazy_gettext(u'Realname'), max_length=180)
display_name = forms.ChoiceField(lazy_gettext(u'Display name'))
description = forms.TextField(lazy_gettext(u'Description'),
max_length=5000, widget=forms.Textarea)
email = forms.TextField(lazy_gettext(u'Email'), required=True,
validators=[is_valid_email()])
www = forms.TextField(lazy_gettext(u'Website'),
validators=[is_valid_url()])
password = forms.TextField(lazy_gettext(u'Password'),
widget=forms.PasswordInput)
password_confirm = forms.TextField(lazy_gettext(u'Confirm password'),
widget=forms.PasswordInput,
help_text=lazy_gettext(u'Confirm password'))
def __init__(self, user=None, initial=None):
if user is not None:
initial = forms.fill_dict(initial,
username=user.username,
real_name=user.real_name,
display_name=user._display_name,
description=user.description,
email=user.email,
www=user.www
)
_UserBoundForm.__init__(self, user, initial)
self.display_name.choices = [
(u'$username', user and user.username or _('Username')),
(u'$real_name', user and user.real_name or _('Realname'))
]
def validate_email(self, value):
query = User.query.filter_by(email=value)
if self.user is not None:
query = query.filter(User.id != self.user.id)
if query.first() is not None:
raise ValidationError(_('This email address is already in use'))
def validate_password(self, value):
if 'password_confirm' in self.data:
password_confirm = self.data['password_confirm']
else:
password_confirm = self.request.values.get('password_confirm', '')
if ((not value == password_confirm) or (value and not password_confirm)
or (password_confirm and not value)):
raise ValidationError(_('Passwords do not match'))
def save_changes(self):
"""Apply the changes."""
if self.data['password']:
self.user.set_password(self.data['password'])
self.user.real_name = self.data['real_name']
self.user.display_name = self.data['display_name']
self.user.description = self.data['description']
self.user.email = self.data['email']
self.user.www = self.data['www']
class DeleteAccountForm(_UserBoundForm):
"""Used for a user to delete a his own account."""
password = forms.TextField(
lazy_gettext(u"Your password is required to delete your account:"),
required=True, widget=forms.PasswordInput,
messages = dict(required=lazy_gettext(u'Your password is required!'))
)
def __init__(self, user, initial=None):
_UserBoundForm.__init__(self, user, forms.fill_dict(initial,
action='delete'
))
def validate_password(self, value):
if not self.user.check_password(value):
raise ValidationError(_(u'Invalid password'))
def delete_user(self):
"""Deletes the user's account."""
# find all the comments by this author and make them comments that
# are no longer linked to the author.
for comment in self.user.comments.all():
comment.unbind_user()
#! plugins can use this to react to user deletes. They can't stop
#! the deleting of the user but they can delete information in
#! their own tables so that the database is consistent afterwards.
#! Additional to the user object the form data is submitted.
emit_event('before-user-deleted', self.user, self.data)
db.delete(self.user)
class _ConfigForm(forms.Form):
"""Internal baseclass for forms that operate on config values."""
def __init__(self, initial=None):
self.app = get_application()
if initial is None:
initial = {}
for name in self.fields:
initial[name] = self.app.cfg[name]
forms.Form.__init__(self, initial)
def _apply(self, t, skip):
for key, value in self.data.iteritems():
if key not in skip:
t[key] = value
def apply(self):
t = self.app.cfg.edit()
self._apply(t, set())
t.commit()
class LogOptionsForm(_ConfigForm):
"""A form for the logfiles."""
log_file = config_field('log_file', lazy_gettext(u'Filename'))
log_level = config_field('log_level', lazy_gettext(u'Log Level'))
class BasicOptionsForm(_ConfigForm):
"""The form where the basic options are changed."""
blog_title = config_field('blog_title', lazy_gettext(u'Blog title'))
blog_tagline = config_field('blog_tagline', lazy_gettext(u'Blog tagline'))
blog_email = config_field('blog_email', lazy_gettext(u'Blog email'))
language = config_field('language', lazy_gettext(u'Language'))
timezone = config_field('timezone', lazy_gettext(u'Timezone'))
session_cookie_name = config_field('session_cookie_name',
lazy_gettext(u'Cookie Name'))
comments_enabled = config_field('comments_enabled',
label=lazy_gettext(u'Comments enabled'),
help_text=lazy_gettext(u'enable comments per default'))
moderate_comments = config_field('moderate_comments',
lazy_gettext(u'Comment Moderation'),
widget=forms.RadioButtonGroup)
comments_open_for = config_field('comments_open_for',
label=lazy_gettext(u'Comments Open Period'))
pings_enabled = config_field('pings_enabled',
lazy_gettext(u'Pingbacks enabled'),
help_text=lazy_gettext(u'enable pingbacks per default'))
use_flat_comments = config_field('use_flat_comments',
lazy_gettext(u'Use flat comments'),
help_text=lazy_gettext(u'All comments are posted top-level'))
default_parser = config_field('default_parser',
lazy_gettext(u'Default parser'))
comment_parser = config_field('comment_parser',
lazy_gettext(u'Comment parser'))
posts_per_page = config_field('posts_per_page',
lazy_gettext(u'Posts per page'))
def __init__(self, initial=None):
_ConfigForm.__init__(self, initial)
self.language.choices = list_languages()
self.default_parser.choices = self.comment_parser.choices = \
self.app.list_parsers()
class URLOptionsForm(_ConfigForm):
"""The form for url changes. This form sends database queries, even
though seems to only operate on the config. Make sure to commit.
"""
blog_url_prefix = config_field('blog_url_prefix',
lazy_gettext(u'Blog URL prefix'))
admin_url_prefix = config_field('admin_url_prefix',
lazy_gettext(u'Admin URL prefix'))
category_url_prefix = config_field('category_url_prefix',
lazy_gettext(u'Category URL prefix'))
tags_url_prefix = config_field('tags_url_prefix',
lazy_gettext(u'Tag URL prefix'))
profiles_url_prefix = config_field('profiles_url_prefix',
lazy_gettext(u'Author Profiles URL prefix'))
post_url_format = config_field('post_url_format',
lazy_gettext(u'Post permalink URL format'))
ascii_slugs = config_field('ascii_slugs',
lazy_gettext(u'Limit slugs to ASCII'))
fixed_url_date_digits = config_field('fixed_url_date_digits',
lazy_gettext(u'Use zero-padded dates'))
force_https = config_field('force_https', lazy_gettext(u'Force HTTPS'))
def _apply(self, t, skip):
for key, value in self.data.iteritems():
if key not in skip:
old = t[key]
if old != value:
if key == 'blog_url_prefix':
change_url_prefix(old, value)
t[key] = value
# update the blog_url based on the force_https flag.
blog_url = (t['force_https'] and 'https' or 'http') + \
':' + t['blog_url'].split(':', 1)[1]
if blog_url != t['blog_url']:
t['blog_url'] = blog_url
class ThemeOptionsForm(_ConfigForm):
"""
The form for theme changes. This is mainly just a dummy,
to get csrf protection working.
"""
class CacheOptionsForm(_ConfigForm):
cache_system = config_field('cache_system', lazy_gettext(u'Cache system'))
cache_timeout = config_field('cache_timeout',
lazy_gettext(u'Default cache timeout'))
enable_eager_caching = config_field('enable_eager_caching',
lazy_gettext(u'Enable eager caching'),
help_text=lazy_gettext(u'Enable'))
memcached_servers = config_field('memcached_servers')
filesystem_cache_path = config_field('filesystem_cache_path')
def context_validate(self, data):
if data['cache_system'] == 'memcached':
if not data['memcached_servers']:
raise ValidationError(_(u'You have to provide at least one '
u'server to use memcached.'))
elif data['cache_system'] == 'filesystem':
if not data['filesystem_cache_path']:
raise ValidationError(_(u'You have to provide cache folder to '
u'use filesystem cache.'))
class MaintenanceModeForm(forms.Form):
"""yet a dummy form, but could be extended later."""
class WordPressImportForm(forms.Form):
"""This form is used in the WordPress importer."""
download_url = forms.TextField(lazy_gettext(u'Dump Download URL'),
validators=[is_valid_url()])
class FeedImportForm(forms.Form):
"""This form is used in the feed importer."""
download_url = forms.TextField(lazy_gettext(u'Feed Download URL'),
validators=[is_valid_url()])
class DeleteImportForm(forms.Form):
"""This form is used to delete a imported file."""
class ExportForm(forms.Form):
"""This form is used to implement the export dialog."""
def delete_comment(comment):
"""
Deletes or marks for deletion the specified comment, depending on the
comment's position in the comment thread. Comments are not pruned from
the database until all their children are.
"""
if comment.children:
# We don't have to check if the children are also marked deleted or not
# because if they still exist, it means somewhere down the tree is a
# comment that is not deleted.
comment.status = COMMENT_DELETED
comment.text = u''
comment.user = None
comment._author = comment._email = comment._www = None
else:
parent = comment.parent
#! plugins can use this to react to comment deletes. They can't
#! stop the deleting of the comment but they can delete information
#! in their own tables so that the database is consistent
#! afterwards.
emit_event('before-comment-deleted', comment)
db.delete(comment)
while parent is not None and parent.is_deleted:
if not parent.children:
newparent = parent.parent
emit_event('before-comment-deleted', parent)
db.delete(parent)
parent = newparent
else:
parent = None
# XXX: one could probably optimize this by tracking the amount
# of deleted comments
comment.post.sync_comment_count()
def make_config_form():
"""Returns the form for the configuration editor."""
app = get_application()
fields = {}
values = {}
use_default_label = lazy_gettext(u'Use default value')
for category in app.cfg.get_detail_list():
items = {}
values[category['name']] = category_values = {}
for item in category['items']:
items[item['name']] = forms.Mapping(
value=item['field'],
use_default=forms.BooleanField(use_default_label)
)
category_values[item['name']] = {
'value': item['value'],
'use_default': False
}
fields[category['name']] = forms.Mapping(**items)
class _ConfigForm(forms.Form):
values = forms.Mapping(**fields)
cfg = app.cfg
def apply(self):
t = self.cfg.edit()
for category, items in self.data['values'].iteritems():
for key, d in items.iteritems():
if category != 'zine':
key = '%s/%s' % (category, key)
if d['use_default']:
t.revert_to_default(key)
else:
t[key] = d['value']
t.commit()
return _ConfigForm({'values': values})
def make_notification_form(user):
"""Creates a notification form."""
app = get_application()
fields = {}
subscriptions = {}
systems = [(s.key, s.name) for s in
sorted(app.notification_manager.systems.values(),
key=lambda x: x.name.lower())]
for obj in app.notification_manager.types(user):
fields[obj.name] = forms.MultiChoiceField(choices=systems,
label=obj.description,
widget=forms.CheckboxGroup)
for ns in user.notification_subscriptions:
subscriptions.setdefault(ns.notification_id, []) \
.append(ns.notification_system)
class _NotificationForm(forms.Form):
subscriptions = forms.Mapping(**fields)
system_choices = systems
def apply(self):
user_subscriptions = {}
for subscription in user.notification_subscriptions:
user_subscriptions.setdefault(subscription.notification_id,
set()).add(subscription.notification_system)
for key, active in self['subscriptions'].iteritems():
currently_set = user_subscriptions.get(key, set())
active = set(active)
# remove outdated
for system in currently_set.difference(active):
for subscription in user.notification_subscriptions \
.filter_by(notification_id=key,
notification_system=system):
db.session.delete(subscription)
# add new
for system in active.difference(currently_set):
user.notification_subscriptions.append(
NotificationSubscription(user=user, notification_id=key,
notification_system=system))
return _NotificationForm({'subscriptions': subscriptions})
def make_import_form(blog):
user_choices = [('__zine_create_user', _(u'Create new user'))] + [
(user.id, user.username)
for user in User.query.order_by('username').all()
]
_authors = dict((author.id, forms.ChoiceField(author.username,
choices=user_choices))
for author in blog.authors)
_posts = dict((post.id, forms.BooleanField(help_text=post.title)) for post
in blog.posts)
_comments = dict((post.id, forms.BooleanField()) for post
in blog.posts)
class _ImportForm(forms.Form):
title = forms.BooleanField(lazy_gettext(u'Blog title'),
help_text=blog.title)
description = forms.BooleanField(lazy_gettext(u'Blog description'),
help_text=blog.description)
authors = forms.Mapping(_authors)
posts = forms.Mapping(_posts)
comments = forms.Mapping(_comments)
load_config = forms.BooleanField(lazy_gettext(u'Load config values'),
help_text=lazy_gettext(
u'Load the configuration values '
u'from the import.'))
def perform_import(self):
from zine.importers import perform_import
return perform_import(get_application(), blog, self.data,
stream=True)
_all_true = dict((x.id, True) for x in blog.posts)
return _ImportForm({'posts': _all_true.copy(),
'comments': _all_true.copy()})
| mitsuhiko/zine | zine/forms.py | Python | bsd-3-clause | 55,129 |
from falcor import *
def render_graph_BSDFViewer():
g = RenderGraph("BSDFViewerGraph")
loadRenderPassLibrary("AccumulatePass.dll")
loadRenderPassLibrary("BSDFViewer.dll")
BSDFViewer = createPass("BSDFViewer")
g.addPass(BSDFViewer, "BSDFViewer")
AccumulatePass = createPass("AccumulatePass")
g.addPass(AccumulatePass, "AccumulatePass")
g.addEdge("BSDFViewer.output", "AccumulatePass.input")
g.markOutput("AccumulatePass.output")
return g
BSDFViewer = render_graph_BSDFViewer()
try: m.addGraph(BSDFViewer)
except NameError: None
| NVIDIAGameWorks/Falcor | Tests/image_tests/renderpasses/graphs/BSDFViewer.py | Python | bsd-3-clause | 569 |
from django.conf.urls.defaults import *
from django_de.apps.authors.models import Author
urlpatterns = patterns('django.views.generic.list_detail',
(r'^$', 'object_list',
dict(
queryset = Author.objects.order_by('name', 'slug'),
template_object_name = 'author',
allow_empty=True,
),
)
)
| django-de/django-de-v2 | django_de/apps/authors/urls.py | Python | bsd-3-clause | 348 |
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from djangoautoconf.local_key_manager import get_default_admin_username, \
get_default_admin_password
from djangoautoconf.management.commands.web_manage_tools.user_creator import create_admin
def create_default_admin():
super_username = get_default_admin_username()
super_password = get_default_admin_password()
if not User.objects.filter(username=super_username).exists():
create_admin(super_username, super_password, "[email protected]")
print("default admin created")
else:
print("default admin already created")
class Command(BaseCommand):
args = ''
help = 'Create command cache for environment where os.listdir is not working'
def handle(self, *args, **options):
create_default_admin() | weijia/djangoautoconf | djangoautoconf/management/commands/create_default_super_user.py | Python | bsd-3-clause | 844 |
"""
Parsing resource files.
See base.py for the ParsedResource base class.
"""
import os.path
from pontoon.sync.formats import (
compare_locales,
ftl,
json_extensions,
lang,
po,
silme,
xliff,
)
# To add support for a new resource format, add an entry to this dict
# where the key is the extension you're parsing and the value is a
# callable returning an instance of a ParsedResource subclass.
SUPPORTED_FORMAT_PARSERS = {
".dtd": silme.parse_dtd,
".ftl": ftl.parse,
".inc": silme.parse_inc,
".ini": silme.parse_ini,
".json": json_extensions.parse,
".lang": lang.parse,
".po": po.parse,
".pot": po.parse,
".properties": silme.parse_properties,
".xlf": xliff.parse,
".xliff": xliff.parse,
".xml": compare_locales.parse,
}
def are_compatible_formats(extension_a, extension_b):
"""
Return True if given file extensions belong to the same file format.
We test that by comparing parsers used by each file extenion.
Note that some formats (e.g. Gettext, XLIFF) use multiple file extensions.
"""
try:
return (
SUPPORTED_FORMAT_PARSERS[extension_a]
== SUPPORTED_FORMAT_PARSERS[extension_b]
)
# File extension not supported
except KeyError:
return False
def parse(path, source_path=None, locale=None):
"""
Parse the resource file at the given path and return a
ParsedResource with its translations.
:param path:
Path to the resource file to parse.
:param source_path:
Path to the corresponding resource file in the source directory
for the resource we're parsing. Asymmetric formats need this
for saving. Defaults to None.
:param locale:
Object which describes information about currently processed locale.
Some of the formats require information about things like e.g. plural form.
"""
root, extension = os.path.splitext(path)
if extension in SUPPORTED_FORMAT_PARSERS:
return SUPPORTED_FORMAT_PARSERS[extension](
path, source_path=source_path, locale=locale
)
else:
raise ValueError("Translation format {0} is not supported.".format(extension))
| jotes/pontoon | pontoon/sync/formats/__init__.py | Python | bsd-3-clause | 2,225 |
import astar
import os
import sys
import csv
import math
from difflib import SequenceMatcher
import unittest
class Station:
def __init__(self, id, name, position):
self.id = id
self.name = name
self.position = position
self.links = []
def build_data():
"""builds the 'map' by reading the data files"""
stations = {}
rootdir = os.path.dirname(__file__)
r = csv.reader(open(os.path.join(rootdir, 'underground_stations.csv')))
next(r) # jump the first line
for record in r:
id = int(record[0])
lat = float(record[1])
lon = float(record[2])
name = record[3]
stations[id] = Station(id, name, (lat, lon))
r = csv.reader(open(os.path.join(rootdir, 'underground_routes.csv')))
next(r) # jump the first line
for id1, id2, lineNumber in r:
id1 = int(id1)
id2 = int(id2)
stations[id1].links.append(stations[id2])
stations[id2].links.append(stations[id1])
return stations
STATIONS = build_data()
def get_station_by_name(name):
"""lookup by name, the name does not have to be exact."""
name = name.lower()
ratios = [(SequenceMatcher(None, name, v.name.lower()).ratio(), v)
for v in STATIONS.values()]
best = max(ratios, key=lambda a: a[0])
if best[0] > 0.7:
return best[1]
else:
return None
def get_path(s1, s2):
""" runs astar on the map"""
def distance(n1, n2):
"""computes the distance between two stations"""
latA, longA = n1.position
latB, longB = n2.position
# convert degres to radians!!
latA, latB, longA, longB = map(
lambda d: d * math.pi / 180, (latA, latB, longA, longB))
x = (longB - longA) * math.cos((latA + latB) / 2)
y = latB - latA
return math.hypot(x, y)
return astar.find_path(s1, s2, neighbors_fnct=lambda s: s.links, heuristic_cost_estimate_fnct=distance, distance_between_fnct=distance)
class LondonTests(unittest.TestCase):
def test_solve_underground(self):
for n1,n2 in [('Chesham', 'Wimbledon'), ('Uxbridge','Upminster'), ('Heathrow Terminal 4','Epping')]:
s1 = get_station_by_name(n1)
s2 = get_station_by_name(n2)
path = get_path(s1, s2)
self.assertTrue(not path is None)
if __name__ == '__main__':
if len(sys.argv) != 3:
print(
'Usage : {script} <station1> <station2>'.format(script=sys.argv[0]))
sys.exit(1)
station1 = get_station_by_name(sys.argv[1])
print('Station 1 : ' + station1.name)
station2 = get_station_by_name(sys.argv[2])
print('Station 2 : ' + station2.name)
print('-' * 80)
path = get_path(station1, station2)
if path:
for s in path:
print(s.name)
else:
raise Exception('path not found!')
| jrialland/python-astar | tests/london/test_london_underground.py | Python | bsd-3-clause | 2,878 |
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-10-11 17:51:43
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-29 17:23:15
from __future__ import print_function, division, absolute_import
import numpy as np
import astropy
import astropy.units as u
import marvin.tools
from marvin.tools.quantities.spectrum import Spectrum
from marvin.utils.general.general import get_drpall_table
from marvin.utils.plot.scatter import plot as scatplot
from marvin import log
from .base import VACMixIn, VACTarget
def choose_best_spectrum(par1, par2, conf_thresh=0.1):
'''choose optimal HI spectrum based on the following criteria:
(1) If both detected and unconfused, choose highest SNR
(2) If both detected and both confused, choose lower confusion prob.
(3) If both detected and one confused, choose non-confused
(4) If one non-confused detection and one non-detection, go with detection
(5) If one confused detetion and one non-detection, go with non-detection
(6) If niether detected, choose lowest rms
par1 and par2 are dictionaries with the following parameters:
program - gbt or alfalfa
snr - integrated SNR
rms - rms noise level
conf_prob - confusion probability
conf_thresh = maximum confusion probability below which we classify
the object as essentially unconfused. Default to 0.1 following
(Stark+21)
'''
programs = [par1['program'],par2['program']]
sel_high_snr = np.argmax([par1['snr'],par2['snr']])
sel_low_rms = np.argmin([par1['rms'],par2['rms']])
sel_low_conf = np.argmin([par1['conf_prob'],par2['conf_prob']])
#both detected
if (par1['snr'] > 0) & (par2['snr'] > 0):
if (par1['conf_prob'] <= conf_thresh) & (par2['conf_prob'] <= conf_thresh):
pick = sel_high_snr
elif (par1['conf_prob'] <= conf_thresh) & (par2['conf_prob'] > conf_thresh):
pick = 0
elif (par1['conf_prob'] > conf_thresh) & (par2['conf_prob'] <= conf_thresh):
pick = 1
elif (par1['conf_prob'] > conf_thresh) & (par2['conf_prob'] > conf_thresh):
pick = sel_low_conf
#both nondetected
elif (par1['snr'] <= 0) & (par2['snr'] <= 0):
pick = sel_low_rms
#one detected
elif (par1['snr'] > 0) & (par2['snr'] <= 0):
if par1['conf_prob'] < conf_thresh:
pick=0
else:
pick=1
elif (par1['snr'] <= 0) & (par2['snr'] > 0):
if par2['conf_prob'] < conf_thresh:
pick=1
else:
pick=0
return programs[pick]
class HIVAC(VACMixIn):
"""Provides access to the MaNGA-HI VAC.
VAC name: HI
URL: https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=hi-manga-data-release-1
Description: Returns HI summary data and spectra
Authors: David Stark and Karen Masters
"""
# Required parameters
name = 'HI'
description = 'Returns HI summary data and spectra'
version = {'MPL-7': 'v1_0_1', 'DR15': 'v1_0_1', 'DR16': 'v1_0_2', 'DR17': 'v2_0_1', 'MPL-11': 'v2_0_1'}
display_name = 'HI'
url = 'https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=hi-manga-data-release-1'
# optional Marvin Tools to attach your vac to
include = (marvin.tools.cube.Cube, marvin.tools.maps.Maps, marvin.tools.modelcube.ModelCube)
# optional methods to attach to your main VAC tool in ~marvin.tools.vacs.VACs
add_methods = ['plot_mass_fraction']
# Required method
def set_summary_file(self, release):
''' Sets the path to the HI summary file '''
# define the variables to build a unique path to your VAC file
self.path_params = {'ver': self.version[release], 'type': 'all', 'program': 'GBT16A_095'}
# get_path returns False if the files do not exist locally
self.summary_file = self.get_path("mangahisum", path_params=self.path_params)
def set_program(self,plateifu):
# download the vac from the SAS if it does not already exist locally
if not self.file_exists(self.summary_file):
self.summary_file = self.download_vac('mangahisum', path_params=self.path_params)
# Find all entries in summary file with this plate-ifu.
# Need the full summary file data.
# Find best entry between GBT/ALFALFA based on dept and confusion.
# Then update self.path_params['program'] with alfalfa or gbt.
summary = HITarget(plateifu, vacfile=self.summary_file)._data
galinfo = summary[summary['plateifu'] == plateifu]
if len(galinfo) == 1 and galinfo['session']=='ALFALFA':
program = 'alfalfa'
elif len(galinfo) in [0, 1]:
# if no entry found or session is GBT, default program to gbt
program = 'gbt'
else:
par1 = {'program': 'gbt','snr': 0.,'rms': galinfo[0]['rms'], 'conf_prob': galinfo[0]['conf_prob']}
par2 = {'program': 'gbt','snr': 0.,'rms': galinfo[1]['rms'], 'conf_prob': galinfo[1]['conf_prob']}
if galinfo[0]['session']=='ALFALFA':
par1['program'] = 'alfalfa'
if galinfo[1]['session']=='ALFALFA':
par2['program'] = 'alfalfa'
if galinfo[0]['fhi'] > 0:
par1['snr'] = galinfo[0]['fhi']/galinfo[0]['efhi']
if galinfo[1]['fhi'] > 0:
par2['snr'] = galinfo[1]['fhi']/galinfo[1]['efhi']
program = choose_best_spectrum(par1,par2)
log.info('Using HI data from {0}'.format(program))
# get path to ancillary VAC file for target HI spectra
self.update_path_params({'program':program})
# Required method
def get_target(self, parent_object):
''' Accesses VAC data for a specific target from a Marvin Tool object '''
# get any parameters you need from the parent object
plateifu = parent_object.plateifu
self.update_path_params({'plateifu': plateifu})
if parent_object.release in ['DR17', 'MPL-11']:
self.set_program(plateifu)
specfile = self.get_path('mangahispectra', path_params=self.path_params)
# create container for more complex return data
hidata = HITarget(plateifu, vacfile=self.summary_file, specfile=specfile)
# get the spectral data for that row if it exists
if hidata._indata and not self.file_exists(specfile):
hidata._specfile = self.download_vac('mangahispectra', path_params=self.path_params)
return hidata
class HITarget(VACTarget):
''' A customized target class to also display HI spectra
This class handles data from both the HI summary file and the
individual spectral files. Row data from the summary file for the given target
is returned via the `data` property. Spectral data can be displayed via
the the `plot_spectrum` method.
Parameters:
targetid (str):
The plateifu or mangaid designation
vacfile (str):
The path of the VAC summary file
specfile (str):
The path to the HI spectra
Attributes:
data:
The target row data from the main VAC file
targetid (str):
The target identifier
'''
def __init__(self, targetid, vacfile, specfile=None):
super(HITarget, self).__init__(targetid, vacfile)
self._specfile = specfile
self._specdata = None
def plot_spectrum(self):
''' Plot the HI spectrum '''
if self._specfile:
if not self._specdata:
self._specdata = self._get_data(self._specfile)
vel = self._specdata['VHI'][0]
flux = self._specdata['FHI'][0]
spec = Spectrum(flux, unit=u.Jy, wavelength=vel,
wavelength_unit=u.km / u.s)
ax = spec.plot(
ylabel='HI\ Flux\ Density', xlabel='Velocity', title=self.targetid, ytrim='minmax'
)
return ax
return None
#
# Functions to become available on your VAC in marvin.tools.vacs.VACs
def plot_mass_fraction(vacdata_object):
''' Plot the HI mass fraction
Computes and plots the HI mass fraction using
the NSA elliptical Petrosian stellar mass from the
MaNGA DRPall file. Only plots data for subset of
targets in both the HI VAC and the DRPall file.
Parameters:
vacdata_object (object):
The `~.VACDataClass` instance of the HI VAC
Example:
>>> from marvin.tools.vacs import VACs
>>> v = VACs()
>>> hi = v.HI
>>> hi.plot_mass_fraction()
'''
drpall = get_drpall_table()
drpall.add_index('plateifu')
data = vacdata_object.data[1].data
subset = drpall.loc[data['plateifu']]
log_stmass = np.log10(subset['nsa_elpetro_mass'])
diff = data['logMHI'] - log_stmass
fig, axes = scatplot(
log_stmass,
diff,
with_hist=False,
ylim=[-5, 5],
xlabel=r'log $M_*$',
ylabel=r'log $M_{HI}/M_*$',
)
return axes[0]
| sdss/marvin | python/marvin/contrib/vacs/hi.py | Python | bsd-3-clause | 9,230 |
from djpcms import sites
if sites.settings.CMS_ORM == 'django':
from djpcms.core.cmsmodels._django import *
elif sites.settings.CMS_ORM == 'stdnet':
from djpcms.core.cmsmodels._stdnet import *
else:
raise NotImplementedError('Objecr Relational Mapper {0} not available for CMS models'.format(sites.settings.CMS_ORM)) | strogo/djpcms | djpcms/models.py | Python | bsd-3-clause | 354 |
import os
import tempfile
from rest_framework import status
from hs_core.hydroshare import resource
from .base import HSRESTTestCase
class TestPublicResourceFlagsEndpoint(HSRESTTestCase):
def setUp(self):
super(TestPublicResourceFlagsEndpoint, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res = resource.create_resource(self.rtype,
self.user,
self.title)
metadata_dict = [
{'description': {'abstract': 'My test abstract'}},
{'subject': {'value': 'sub-1'}}
]
file_one = "test1.txt"
open(file_one, "w").close()
self.file_one = open(file_one, "r")
self.txt_file_path = os.path.join(self.tmp_dir, 'text.txt')
txt = open(self.txt_file_path, 'w')
txt.write("Hello World\n")
txt.close()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res_two = resource.create_resource(self.rtype,
self.user,
self.title,
files=(self.file_one,),
metadata=metadata_dict)
self.pid = res.short_id
self.pid_two = res_two.short_id
self.resources_to_delete.append(self.pid)
self.resources_to_delete.append(self.pid_two)
def test_set_resource_flag_make_public(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_public"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
flag_url = "/hsapi/resource/%s/flag/" % self.pid_two
response = self.client.post(flag_url, {
"t": "make_public"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_private(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_private"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_discoverable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid_two
response = self.client.post(flag_url, {
"t": "make_discoverable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_not_discoverable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_not_discoverable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_not_shareable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_not_shareable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_shareable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_shareable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
| RENCI/xDCIShare | hs_core/tests/api/rest/test_resource_flags.py | Python | bsd-3-clause | 3,544 |
################################################################
# File: e621.py
# Title: MANGAdownloader's site scraper
# Author: ASL97/ASL <[email protected]>
# Version: 1
# Notes : DON'T EMAIL ME UNLESS YOU NEED TO
# TODO: *blank*
################################################################
import misc
# used in __main__, download using id is currently not implemented yet
id_supported = False
_type = ["1","10"]
def scrap_manga(link, chapter):
chapter[1] = {}
tmp = link.split("/")[-1]
if tmp.isdigit():
id_ = tmp
link = "http://e621.net/pool/show.json?id=%s"%(id_)
j = misc.download_json(link)
name = j["name"]
total = j["post_count"]
page_ = 1
page = 0
for d in j["posts"]:
chapter[1][page] = {"link": d['file_url'],
"name": d['file_url'].split("/")[-1]}
page += 1
while page < total:
page_ += 1
link = "http://e621.net/pool/show.json?id=%s&page=%d"%(id_,page_)
j = misc.download_json(link)
for d in j["posts"]:
chapter[1][page] = {"link": d['file_url'],
"name": d['file_url'].split("/")[-1]}
page += 1
return name
else:
misc.Exit("fail to get id")
| asl97/MANGAdownloader | scrapers/e621.py | Python | bsd-3-clause | 1,343 |
#!/usr/bin/env python
import setuptools
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
setuptools.setup(
name='orwell.agent',
version='0.0.1',
description='Agent connecting to the game server.',
author='',
author_email='',
packages=setuptools.find_packages(exclude="test"),
test_suite='nose.collector',
install_requires=['pyzmq', 'cliff'],
tests_require=['nose', 'coverage', 'mock'],
entry_points={
'console_scripts': [
'thought_police = orwell.agent.main:main',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6.0',
)
| orwell-int/agent-server-game-python | setup.py | Python | bsd-3-clause | 1,239 |
# -*- coding: utf8 -*-
"""
.. module:: burpui.misc.backend.burp2
:platform: Unix
:synopsis: Burp-UI burp2 backend module.
.. moduleauthor:: Ziirish <[email protected]>
"""
import re
import os
import time
import json
from collections import OrderedDict
from .burp1 import Burp as Burp1
from .interface import BUIbackend
from .utils.burp2 import Monitor
from .utils.constant import BURP_REVERSE_COUNTERS, BURP_STATUS_FORMAT_V2
from ..parser.burp2 import Parser
from ...utils import human_readable as _hr, utc_to_local
from ...exceptions import BUIserverException
from ..._compat import to_unicode
# Some functions are the same as in Burp1 backend
class Burp(Burp1):
"""The :class:`burpui.misc.backend.burp2.Burp` class provides a consistent
backend for ``burp-2`` servers.
It extends the :class:`burpui.misc.backend.burp1.Burp` class because a few
functions can be reused. The rest is just overrided.
:param server: ``Burp-UI`` server instance in order to access logger
and/or some global settings
:type server: :class:`burpui.engines.server.BUIServer`
:param conf: Configuration to use
:type conf: :class:`burpui.config.BUIConfig`
"""
# backend version
_vers = 2
# cache to store the guessed OS
_os_cache = {}
def __init__(self, server=None, conf=None):
"""
:param server: ``Burp-UI`` server instance in order to access logger
and/or some global settings
:type server: :class:`burpui.engines.server.BUIServer`
:param conf: Configuration to use
:type conf: :class:`burpui.config.BUIConfig`
"""
BUIbackend.__init__(self, server, conf)
self.monitor = Monitor(self.burpbin, self.burpconfcli, self.app, self.timeout)
self.batch_list_supported = self.monitor.batch_list_supported
self.parser = Parser(self)
self.logger.info(f"burp binary: {self.burpbin}")
self.logger.info(f"strip binary: {self.stripbin}")
self.logger.info(f"burp conf cli: {self.burpconfcli}")
self.logger.info(f"burp conf srv: {self.burpconfsrv}")
self.logger.info(f"command timeout: {self.timeout}")
self.logger.info(f"tmpdir: {self.tmpdir}")
self.logger.info(f"zip64: {self.zip64}")
self.logger.info(f"includes: {self.includes}")
self.logger.info(f"enforce: {self.enforce}")
self.logger.info(f"revoke: {self.revoke}")
self.logger.info(f"client version: {self.client_version}")
self.logger.info(f"server version: {self.server_version}")
@property
def client_version(self):
return self.monitor.client_version
@property
def server_version(self):
return self.monitor.server_version
@staticmethod
def _human_st_mode(mode):
"""Convert the st_mode returned by stat in human readable (ls-like)
format
"""
hur = ""
if os.path.stat.S_ISREG(mode):
hur = "-"
elif os.path.stat.S_ISLNK(mode):
hur = "l"
elif os.path.stat.S_ISSOCK(mode):
hur = "s"
elif os.path.stat.S_ISDIR(mode):
hur = "d"
elif os.path.stat.S_ISBLK(mode):
hur = "b"
elif os.path.stat.S_ISFIFO(mode):
hur = "p"
elif os.path.stat.S_ISCHR(mode):
hur = "c"
else:
hur = "-"
for who in "USR", "GRP", "OTH":
for perm in "R", "W", "X":
if mode & getattr(os.path.stat, "S_I" + perm + who):
hur += perm.lower()
else:
hur += "-"
return hur
def statistics(self, agent=None):
"""See :func:`burpui.misc.backend.interface.BUIbackend.statistics`"""
return {
"alive": self.monitor.alive,
"server_version": self.server_version,
"client_version": self.client_version,
}
def status(self, query="c:\n", timeout=None, cache=True, agent=None):
"""See :func:`burpui.misc.backend.interface.BUIbackend.status`"""
return self.monitor.status(query, timeout, cache)
def _get_backup_logs(self, number, client, forward=False, deep=False):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.get_backup_logs`
"""
ret = {}
ret2 = {}
if not client or not number:
return ret
query = self.status("c:{0}:b:{1}\n".format(client, number))
if not query:
return ret
try:
logs = query["clients"][0]["backups"][0]["logs"]["list"]
except KeyError:
self.logger.warning("No logs found")
return ret
if "backup_stats" in logs:
ret = self._parse_backup_stats(number, client, forward)
if "backup" in logs and deep:
ret2 = self._parse_backup_log(number, client)
ret.update(ret2)
ret["encrypted"] = False
if "files_enc" in ret and ret["files_enc"]["total"] > 0:
ret["encrypted"] = True
return ret
@staticmethod
def _do_parse_backup_log(data, client):
# tests ordered as the logs order
ret = OrderedDict()
ret["client_version"] = None
ret["protocol"] = 1
ret["is_windows"] = False
ret["server_version"] = None
if not data:
return ret
try:
log = data["clients"][0]["backups"][0]["logs"]["backup"]
except KeyError:
# Assume protocol 1 in all cases unless explicitly found Protocol 2
return ret
# pre-compile regex since they'll be called on every log line
regex = {
"client_version": re.compile(r"Client version: (\d+\.\d+\.\d+)$"),
"server_version": re.compile(
r"WARNING: Client '{}' version '\d+\.\d+\.\d+' does not match server version '(\d+\.\d+\.\d+)'. An upgrade is recommended.$".format(
client
)
),
"protocol": re.compile(r"Protocol: (\d)$"),
"is_windows": re.compile(r"Client is Windows$"),
}
expressions_list = list(ret.keys())
catching_expressions = ["client_version", "server_version", "protocol"]
casting_expressions = {
"protocol": int,
}
def __dummy(val):
return val
for line in log:
expressions = expressions_list
for expression in expressions:
if expression in catching_expressions:
catch = regex[expression].search(line)
if catch:
cast = casting_expressions.get(expression, __dummy)
ret[expression] = cast(catch.group(1))
# don't search this expression twice
expressions_list.remove(expression)
break
else:
if expression in regex and regex[expression].search(line):
ret[expression] = True
# don't search this expression twice
expressions_list.remove(expression)
break
return ret
def _parse_backup_log(self, number, client):
"""The :func:`burpui.misc.backend.burp2.Burp._parse_backup_log`
function helps you determine if the backup is protocol 2 or 1 and various
useful details.
:param number: Backup number to work on
:type number: int
:param client: Client name to work on
:type client: str
:returns: a dict with some useful details
"""
data = self.status("c:{0}:b:{1}:l:backup\n".format(client, number))
return self._do_parse_backup_log(data, client)
def _do_parse_backup_stats(
self, data, result, number, client, forward=False, agent=None
):
ret = {}
translate = {
"time_start": "start",
"time_end": "end",
"time_taken": "duration",
"bytes": "totsize",
"bytes_received": "received",
"bytes_estimated": "estimated_bytes",
"files": "files",
"files_encrypted": "files_enc",
"directories": "dir",
"soft_links": "softlink",
"hard_links": "hardlink",
"meta_data": "meta",
"meta_data_encrypted": "meta_enc",
"special_files": "special",
"efs_files": "efs",
"vss_headers": "vssheader",
"vss_headers_encrypted": "vssheader_enc",
"vss_footers": "vssfooter",
"vss_footers_encrypted": "vssfooter_enc",
"total": "total",
"grand_total": "total",
}
counts = {
"new": "count",
"changed": "changed",
"unchanged": "same",
"deleted": "deleted",
"total": "scanned",
"scanned": "scanned",
}
single = [
"time_start",
"time_end",
"time_taken",
"bytes_received",
"bytes_estimated",
"bytes",
]
if not data:
return ret
try:
back = data["clients"][0]["backups"][0]
except KeyError:
self.logger.warning("No backup found")
return ret
if "backup_stats" not in back["logs"]:
self.logger.warning("No stats found for backup")
return ret
stats = None
try:
stats = json.loads("".join(back["logs"]["backup_stats"]))
except:
stats = back["logs"]["backup_stats"]
if not stats:
return ret
# server was upgraded but backup comes from an older version
if "counters" not in stats:
return super(Burp, self)._parse_backup_stats(
number, client, forward, stats, agent
)
counters = stats["counters"]
for counter in counters:
name = counter["name"]
if name in translate:
name = translate[name]
if counter["name"] in single:
result[name] = counter["count"]
else:
result[name] = {}
for (key, val) in counts.items():
if val in counter:
result[name][key] = counter[val]
else:
result[name][key] = 0
if "start" in result and "end" in result:
result["duration"] = result["end"] - result["start"]
# convert utc timestamp to local
# example: 1468850307 -> 1468857507
result["start"] = utc_to_local(result["start"])
result["end"] = utc_to_local(result["end"])
# Needed for graphs
if "received" not in result:
result["received"] = 1
return result
def _parse_backup_stats(self, number, client, forward=False, agent=None):
"""The :func:`burpui.misc.backend.burp2.Burp._parse_backup_stats`
function is used to parse the burp logs.
:param number: Backup number to work on
:type number: int
:param client: Client name to work on
:type client: str
:param forward: Is the client name needed in later process
:type forward: bool
:param agent: What server to ask (only in multi-agent mode)
:type agent: str
:returns: Dict containing the backup log
"""
backup = {"os": self._guess_os(client), "number": int(number)}
if forward:
backup["name"] = client
query = self.status("c:{0}:b:{1}:l:backup_stats\n".format(client, number))
return self._do_parse_backup_stats(
query, backup, number, client, forward, agent
)
# TODO: support old clients
# NOTE: this should now be partly done since we fallback to the Burp1 code
# def _parse_backup_log(self, fh, number, client=None, agent=None):
# """
# parse_backup_log parses the log.gz of a given backup and returns a
# dict containing different stats used to render the charts in the
# reporting view
# """
# return {}
# def get_clients_report(self, clients, agent=None):
def get_counters(self, name=None, agent=None):
"""See :func:`burpui.misc.backend.interface.BUIbackend.get_counters`"""
ret = {}
query = self.status("c:{0}\n".format(name), cache=False)
# check the status returned something
if not query:
return ret
try:
client = query["clients"][0]
except KeyError:
self.logger.warning("Client not found")
return ret
return self._do_get_counters(client)
def _do_get_counters(self, data):
ret = {}
client = data
# check the client is currently backing-up
if "run_status" not in client or client["run_status"] != "running":
return ret
backup = None
phases = ["working", "finishing"]
try:
for child in client["children"]:
if "action" in child and child["action"] == "backup":
backup = child
break
except KeyError:
for back in client["backups"]:
if "flags" in back and any(x in back["flags"] for x in phases):
backup = back
break
# check we found a working backup
if not backup:
return ret
# list of single counters (type CNTR_SINGLE_FIELD in cntr.c)
single = [
"bytes_estimated",
"bytes",
"bytes_received",
"bytes_sent",
"time_start",
"time_end",
"warnings",
"errors",
]
# translation table to be compatible with burp1
def translate(cntr):
translate_table = {"bytes_estimated": "estimated_bytes"}
try:
return translate_table[cntr]
except KeyError:
return cntr
for counter in backup.get("counters", {}):
name = translate(counter["name"])
if counter["name"] not in single:
# Prior burp-2.1.6 some counters are reversed
# See https://github.com/grke/burp/commit/adeb3ad68477303991a393fa7cd36bc94ff6b429
if self.server_version and self.server_version < BURP_REVERSE_COUNTERS:
ret[name] = [
counter["count"],
counter["same"], # reversed
counter["changed"], # reversed
counter["deleted"],
counter["scanned"],
]
else:
ret[name] = [
counter["count"],
counter["changed"],
counter["same"],
counter["deleted"],
counter["scanned"],
]
else:
ret[name] = counter["count"]
if "phase" in backup:
ret["phase"] = backup["phase"]
else:
for phase in phases:
if phase in backup.get("flags", []):
ret["phase"] = phase
break
if "bytes" not in ret:
ret["bytes"] = 0
if set(["time_start", "estimated_bytes", "bytes"]) <= set(ret.keys()):
try:
diff = time.time() - int(ret["time_start"])
byteswant = int(ret["estimated_bytes"])
bytesgot = int(ret["bytes"])
bytespersec = bytesgot / diff
bytesleft = byteswant - bytesgot
ret["speed"] = bytespersec
if bytespersec > 0:
timeleft = int(bytesleft / bytespersec)
ret["timeleft"] = timeleft
else:
ret["timeleft"] = -1
except:
ret["timeleft"] = -1
try:
ret["percent"] = round(
float(ret["bytes"]) / float(ret["estimated_bytes"]) * 100
)
except (ZeroDivisionError, KeyError):
# You know... division by 0
ret["percent"] = 0
return ret
def is_backup_running(self, name=None, agent=None):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.is_backup_running`
"""
if not name:
return False
try:
query = self.status("c:{0}\n".format(name))
except BUIserverException:
return False
return self._do_is_backup_running(query)
def _do_is_backup_running(self, data):
if data:
try:
return data["clients"][0]["run_status"] in ["running"]
except KeyError:
pass
return False
def is_one_backup_running(self, agent=None):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.is_one_backup_running`
"""
ret = []
try:
clients = self.get_all_clients(last_attempt=False)
except BUIserverException:
return ret
return self._do_is_one_backup_running(clients)
def _do_is_one_backup_running(self, data):
ret = []
for client in data:
if client["state"] in ["running"]:
ret.append(client["name"])
return ret
def _status_human_readable(self, status):
"""The label has changed in burp2, we override it to be compatible with
burp1's format
:param status: The status returned by the burp2 server
:type status: str
:returns: burp1 status compatible
"""
if not status:
return None
if status == "c crashed":
return "client crashed"
if status == "s crashed":
return "server crashed"
return status
def _get_last_backup(self, name, working=True):
"""Return the last backup of a given client
:param name: Name of the client
:type name: str
:param working: Also return uncomplete backups
:type working: bool
:returns: The last backup
"""
try:
clients = self.status("c:{}".format(name))
client = clients["clients"][0]
i = 0
while True:
ret = client["backups"][i]
if not working and "working" in ret["flags"]:
i += 1
continue
return ret
except (KeyError, TypeError, IndexError, BUIserverException):
return None
def _guess_os(self, name):
"""Return the OS of the given client based on the magic *os* label
:param name: Name of the client
:type name: str
:returns: The guessed OS of the client
::
grep label /etc/burp/clientconfdir/toto
label = os: Darwin OS
"""
ret = "Unknown"
if name in self._os_cache:
return self._os_cache[name]
labels = self.get_client_labels(name)
OSES = []
for label in labels:
if re.match("os:", label, re.IGNORECASE):
_os = label.split(":", 1)[1].strip()
if _os not in OSES:
OSES.append(_os)
if OSES:
ret = OSES[-1]
else:
# more aggressive check
last = self._get_last_backup(name, False)
if last:
try:
tree = self.get_tree(name, last["number"])
if tree[0]["name"] != "/":
ret = "Windows"
else:
ret = "Unix/Linux"
except (IndexError, KeyError, BUIserverException):
pass
self._os_cache[name] = ret
return ret
def get_all_clients(self, agent=None, last_attempt=True):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.get_all_clients`
"""
ret = []
query = self.status()
if not query or "clients" not in query:
return ret
clients = query["clients"]
for client in clients:
cli = {}
cli["name"] = client["name"]
cli["state"] = self._status_human_readable(client["run_status"])
infos = client["backups"]
if cli["state"] in ["running"]:
cli["last"] = "now"
cli["last_attempt"] = "now"
elif not infos:
cli["last"] = "never"
cli["last_attempt"] = "never"
else:
convert = True
infos = infos[0]
if self.server_version and self.server_version < BURP_STATUS_FORMAT_V2:
cli["last"] = infos["timestamp"]
convert = False
# only do deep inspection when server >= BURP_STATUS_FORMAT_V2
elif self.deep_inspection:
logs = self.get_backup_logs(infos["number"], client["name"])
cli["last"] = logs["start"]
else:
cli["last"] = utc_to_local(infos["timestamp"])
if last_attempt:
last_backup = self._get_last_backup(client["name"])
if convert:
cli["last_attempt"] = utc_to_local(last_backup["timestamp"])
else:
cli["last_attempt"] = last_backup["timestamp"]
ret.append(cli)
return ret
def get_client_status(self, name=None, agent=None):
"""See :func:`burpui.misc.backend.interface.BUIbackend.get_client_status`"""
ret = {}
if not name:
return ret
query = self.status("c:{0}\n".format(name))
if not query:
return ret
try:
client = query["clients"][0]
except (KeyError, IndexError):
self.logger.warning("Client not found")
return ret
return self._do_get_client_status(client)
def _do_get_client_status(self, data):
ret = {}
client = data
ret["state"] = self._status_human_readable(client["run_status"])
infos = client["backups"]
if ret["state"] in ["running"]:
try:
ret["phase"] = client["phase"]
except KeyError:
for child in client.get("children", []):
if "action" in child and child["action"] == "backup":
ret["phase"] = child["phase"]
break
counters = self._do_get_counters(client)
if "percent" in counters:
ret["percent"] = counters["percent"]
else:
ret["percent"] = 0
ret["last"] = "now"
elif not infos:
ret["last"] = "never"
else:
infos = infos[0]
ret["last"] = infos["timestamp"]
return ret
def get_client(self, name=None, agent=None):
"""See :func:`burpui.misc.backend.interface.BUIbackend.get_client`"""
return self.get_client_filtered(name)
def get_client_filtered(
self, name=None, limit=-1, page=None, start=None, end=None, agent=None
):
"""See :func:`burpui.misc.backend.interface.BUIbackend.get_client_filtered`"""
ret = []
if not name:
return ret
query = self.status("c:{0}\n".format(name))
if not query:
return ret
try:
backups = query["clients"][0]["backups"]
except (KeyError, IndexError):
self.logger.warning("Client not found")
return ret
for idx, backup in enumerate(backups):
# skip the first elements if we are in a page
if page and page > 1 and limit > 0:
if idx < (page - 1) * limit:
continue
back = {}
# skip running backups since data will be inconsistent
if "flags" in backup and "working" in backup["flags"]:
continue
back["number"] = backup["number"]
if "flags" in backup and "deletable" in backup["flags"]:
back["deletable"] = True
else:
back["deletable"] = False
back["date"] = backup["timestamp"]
# skip backups before "start"
if start and backup["timestamp"] < start:
continue
# skip backups after "end"
if end and backup["timestamp"] > end:
continue
def __get_log(client, bkp, res):
log = self.get_backup_logs(bkp["number"], client)
try:
res["encrypted"] = log["encrypted"]
try:
res["received"] = log["received"]
except KeyError:
res["received"] = 0
try:
res["size"] = log["totsize"]
except KeyError:
res["size"] = 0
res["end"] = log["end"]
# override date since the timestamp is odd
res["date"] = log["start"]
except Exception:
self.logger.warning("Unable to parse logs")
return None
return res
with_log = __get_log(name, backup, back)
if with_log:
ret.append(with_log)
# stop after "limit" elements
if page and page > 1 and limit > 0:
if idx >= page * limit:
break
elif limit > 0 and idx >= limit:
break
# Here we need to reverse the array so the backups are sorted by num
# ASC
ret.reverse()
return ret
def is_backup_deletable(self, name=None, backup=None, agent=None):
"""Check if a given backup is deletable"""
if not name or not backup:
return False
query = self.status("c:{0}:b:{1}\n".format(name, backup))
if not query:
return False
return self._do_is_backup_deletable(query)
def _do_is_backup_deletable(self, data):
query = data
try:
flags = query["clients"][0]["backups"][0]["flags"]
return "deletable" in flags
except KeyError:
return False
def _format_tree(self, data, top, level):
ret = []
if not data:
return ret
try:
backup = data["clients"][0]["backups"][0]
except KeyError:
return ret
for entry in backup["browse"]["entries"]:
data = {}
base = None
dirn = None
if top == "*":
base = os.path.basename(entry["name"])
dirn = os.path.dirname(entry["name"])
if entry["name"] == ".":
continue
else:
data["name"] = base or entry["name"]
data["mode"] = self._human_st_mode(entry["mode"])
if re.match("^(d|l)", data["mode"]):
data["type"] = "d"
data["folder"] = True
else:
data["type"] = "f"
data["folder"] = False
data["inodes"] = entry["nlink"]
data["uid"] = entry["uid"]
data["gid"] = entry["gid"]
data["parent"] = dirn or top
data["size"] = "{0:.1eM}".format(_hr(entry["size"]))
data["date"] = entry["mtime"]
data["fullname"] = (
os.path.join(top, entry["name"]) if top != "*" else entry["name"]
)
data["level"] = level
data["children"] = []
ret.append(data)
return ret
def get_tree(self, name=None, backup=None, root=None, level=-1, agent=None):
"""See :func:`burpui.misc.backend.interface.BUIbackend.get_tree`"""
if not name or not backup:
return []
if not root:
top = ""
else:
top = to_unicode(root)
# we know this operation may take a while so we arbitrary increase the
# read timeout
timeout = None
if top == "*":
timeout = max(self.timeout, 300)
query = self.status("c:{0}:b:{1}:p:{2}\n".format(name, backup, top), timeout)
return self._format_tree(query, top, level)
def get_client_version(self, agent=None):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.get_client_version`
"""
return self.client_version
def get_server_version(self, agent=None):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.get_server_version`
"""
if not self.server_version:
self.status()
return self.server_version
def get_client_labels(self, client=None, agent=None):
"""See
:func:`burpui.misc.backend.interface.BUIbackend.get_client_labels`
"""
ret = []
if not client:
return ret
# micro optimization since the status results are cached in memory for a
# couple seconds, using the same global query and iterating over it
# will be more efficient than filtering burp-side
query = self.status("c:\n")
if not query:
return ret
try:
for cli in query["clients"]:
if cli["name"] == client:
return cli["labels"]
except KeyError:
return ret
# Same as in Burp1 backend
# def restore_files(
# self,
# name=None,
# backup=None,
# files=None,
# strip=None,
# archive='zip',
# password=None,
# agent=None):
# def read_conf_cli(self, agent=None):
# def read_conf_srv(self, agent=None):
# def store_conf_cli(self, data, agent=None):
# def store_conf_srv(self, data, agent=None):
# def get_parser_attr(self, attr=None, agent=None):
| ziirish/burp-ui | burpui/misc/backend/burp2.py | Python | bsd-3-clause | 30,550 |
from enum import Enum
from typing import List, Any, cast
import yass
from tutorial.base_types_external import Integer
# shows how to use contract internal base types
class ExpirationHandler(yass.BaseTypeHandler):
def readBase(self, reader: yass.Reader) -> 'Expiration':
return Expiration(
reader.readZigZagInt()
)
def writeBase(self, value: 'Expiration', writer: yass.Writer) -> None:
writer.writeZigZagInt(value.year)
class Expiration:
TYPE_DESC = yass.TypeDesc(yass.FIRST_DESC_ID + 1, ExpirationHandler())
def __init__(self, year: int) -> None:
self.year = year
def __str__(self) -> str:
return f"{self.year}"
class PriceKind(Enum):
BID = 0
ASK = 1
class Price:
def __init__(self) -> None:
self.instrumentId: Integer = cast(Integer, None)
self.kind: PriceKind = cast(PriceKind, None)
self.value: Integer = cast(Integer, None)
@yass.abstract
class Instrument:
def __init__(self) -> None:
self.id: Integer = cast(Integer, None)
self.name: str = cast(str, None)
class SystemException(Exception):
def __init__(self) -> None:
self.details: str = cast(str, None)
@yass.abstract
class ApplicationException(Exception):
def __init__(self) -> None:
pass
class UnknownInstrumentsException(ApplicationException):
def __init__(self) -> None:
ApplicationException.__init__(self)
self.instrumentIds: List[Integer] = cast(List[Integer], None)
self.onlyNeededForTests1: Any = cast(Any, None)
self.onlyNeededForTests2: bytes = cast(bytes, None)
self.onlyNeededForTests3: Exception = cast(Exception, None)
class Node:
def __init__(self) -> None:
self.id: float = cast(float, None)
self.links: List[Node] = cast(List[Node], None)
self.next: Node = cast(Node, None)
class EchoService:
def echo(self, value: Any) -> Any:
raise NotImplementedError()
class PriceEngine:
def subscribe(self, instrumentIds: List[Integer]) -> None:
raise NotImplementedError()
class PriceListener:
def newPrices(self, prices: List[Price]) -> None:
raise NotImplementedError()
| softappeal/yass | py3/tutorial/generated/contract/__init__.py | Python | bsd-3-clause | 2,223 |
import re
from nexusmaker.tools import natsort
is_combined_cognate = re.compile(r"""(\d+)([a-z]+)""")
class CognateParser(object):
UNIQUE_IDENTIFIER = "u_"
def __init__(self, strict=True, uniques=True, sort=True):
"""
Parses cognates.
- strict (default=True): remove dubious cognates (?)
- uniques (default=True): non-cognate items get unique states
- sort (default=True): normalise ordering with natsort (i.e. 2,1 => 1,2)
"""
self.uniques = uniques
self.strict = strict
self.sort = sort
self.unique_id = 0
def is_unique_cognateset(self, cog, labelled=False):
if not labelled:
return str(cog).startswith(self.UNIQUE_IDENTIFIER)
else:
return "_%s" % self.UNIQUE_IDENTIFIER in str(cog)
def _split_combined_cognate(self, cognate):
m = is_combined_cognate.findall(cognate)
return [m[0][0], cognate] if m else [cognate]
def get_next_unique(self):
if not self.uniques:
return []
self.unique_id = self.unique_id + 1
return ["%s%d" % (self.UNIQUE_IDENTIFIER, self.unique_id)]
def parse_cognate(self, value):
raw = value
if value is None:
return self.get_next_unique()
elif value == '':
return self.get_next_unique()
elif str(value).lower() == 's': # error
return self.get_next_unique()
elif 'x' in str(value).lower(): # error
return self.get_next_unique()
elif isinstance(value, str):
if value.startswith(","):
raise ValueError("Possible broken combined cognate %r" % raw)
if value.endswith("-"):
raise ValueError("Possible broken combined cognate %r" % raw)
elif ';' in value:
raise ValueError("Possible broken combined cognate %r" % raw)
value = value.replace('.', ',').replace("/", ",")
# parse out subcognates
value = [
self._split_combined_cognate(v.strip()) for v in value.split(",")
]
value = [item for sublist in value for item in sublist]
if self.strict:
# remove dubious cognates
value = [v for v in value if '?' not in v]
# exit if all are dubious, setting to unique state
if len(value) == 0:
return self.get_next_unique()
else:
value = [v.replace("?", "") for v in value]
# remove any empty things in the list
value = [v for v in value if len(v) > 0]
if self.sort:
value = natsort(value)
return value
else:
raise ValueError("%s" % type(value))
| SimonGreenhill/NexusMaker | nexusmaker/CognateParser.py | Python | bsd-3-clause | 2,824 |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for analyzer
"""
import json
import TestGyp
found = 'Found dependency'
found_all = 'Found dependency (all)'
not_found = 'No dependencies'
def _CreateConfigFile(files, additional_compile_targets, test_targets=[]):
"""Creates the analyzer config file, which is used as the input to analyzer.
See description of analyzer.py for description of the arguments."""
f = open('test_file', 'w')
to_write = {'files': files,
'test_targets': test_targets,
'additional_compile_targets': additional_compile_targets }
json.dump(to_write, f)
f.close()
def _CreateBogusConfigFile():
f = open('test_file','w')
f.write('bogus')
f.close()
def _ReadOutputFileContents():
f = open('analyzer_output', 'r')
result = json.load(f)
f.close()
return result
# NOTE: this would be clearer if it subclassed TestGypCustom, but that trips
# over a bug in pylint (E1002).
test = TestGyp.TestGypCustom(format='analyzer')
def CommonArgs():
return ('-Gconfig_path=test_file',
'-Ganalyzer_output_path=analyzer_output')
def run_analyzer(*args, **kw):
"""Runs the test specifying a particular config and output path."""
args += CommonArgs()
test.run_gyp('test.gyp', *args, **kw)
def run_analyzer2(*args, **kw):
"""Same as run_analyzer(), but passes in test2.gyp instead of test.gyp."""
args += CommonArgs()
test.run_gyp('test2.gyp', *args, **kw)
def run_analyzer3(*args, **kw):
"""Same as run_analyzer(), but passes in test3.gyp instead of test.gyp."""
args += CommonArgs()
test.run_gyp('test3.gyp', *args, **kw)
def run_analyzer4(*args, **kw):
"""Same as run_analyzer(), but passes in test3.gyp instead of test.gyp."""
args += CommonArgs()
test.run_gyp('test4.gyp', *args, **kw)
def EnsureContains(matched=False, compile_targets=set(), test_targets=set()):
"""Verifies output contains |compile_targets|."""
result = _ReadOutputFileContents()
if result.get('error', None):
print 'unexpected error', result.get('error')
test.fail_test()
if result.get('invalid_targets', None):
print 'unexpected invalid_targets', result.get('invalid_targets')
test.fail_test()
actual_compile_targets = set(result['compile_targets'])
if actual_compile_targets != compile_targets:
print 'actual compile_targets:', actual_compile_targets, \
'\nexpected compile_targets:', compile_targets
test.fail_test()
actual_test_targets = set(result['test_targets'])
if actual_test_targets != test_targets:
print 'actual test_targets:', actual_test_targets, \
'\nexpected test_targets:', test_targets
test.fail_test()
if matched and result['status'] != found:
print 'expected', found, 'got', result['status']
test.fail_test()
elif not matched and result['status'] != not_found:
print 'expected', not_found, 'got', result['status']
test.fail_test()
def EnsureMatchedAll(compile_targets, test_targets=set()):
result = _ReadOutputFileContents()
if result.get('error', None):
print 'unexpected error', result.get('error')
test.fail_test()
if result.get('invalid_targets', None):
print 'unexpected invalid_targets', result.get('invalid_targets')
test.fail_test()
if result['status'] != found_all:
print 'expected', found_all, 'got', result['status']
test.fail_test()
actual_compile_targets = set(result['compile_targets'])
if actual_compile_targets != compile_targets:
print ('actual compile_targets:', actual_compile_targets,
'\nexpected compile_targets:', compile_targets)
test.fail_test()
actual_test_targets = set(result['test_targets'])
if actual_test_targets != test_targets:
print ('actual test_targets:', actual_test_targets,
'\nexpected test_targets:', test_targets)
test.fail_test()
def EnsureError(expected_error_string):
"""Verifies output contains the error string."""
result = _ReadOutputFileContents()
if result.get('error', '').find(expected_error_string) == -1:
print 'actual error:', result.get('error', ''), '\nexpected error:', \
expected_error_string
test.fail_test()
def EnsureStdoutContains(expected_error_string):
if test.stdout().find(expected_error_string) == -1:
print 'actual stdout:', test.stdout(), '\nexpected stdout:', \
expected_error_string
test.fail_test()
def EnsureInvalidTargets(expected_invalid_targets):
"""Verifies output contains invalid_targets."""
result = _ReadOutputFileContents()
actual_invalid_targets = set(result['invalid_targets'])
if actual_invalid_targets != expected_invalid_targets:
print 'actual invalid_targets:', actual_invalid_targets, \
'\nexpected :', expected_invalid_targets
test.fail_test()
# Two targets, A and B (both static_libraries) and A depends upon B. If a file
# in B changes, then both A and B are output. It is not strictly necessary that
# A is compiled in this case, only B.
_CreateConfigFile(['b.c'], ['all'])
test.run_gyp('static_library_test.gyp', *CommonArgs())
EnsureContains(matched=True, compile_targets={'a' ,'b'})
# Verifies config_path must be specified.
test.run_gyp('test.gyp')
EnsureStdoutContains('Must specify files to analyze via config_path')
# Verifies config_path must point to a valid file.
test.run_gyp('test.gyp', '-Gconfig_path=bogus_file',
'-Ganalyzer_output_path=analyzer_output')
EnsureError('Unable to open file bogus_file')
# Verify 'invalid_targets' is present when bad target is specified.
_CreateConfigFile(['exe2.c'], ['bad_target'])
run_analyzer()
EnsureInvalidTargets({'bad_target'})
# Verifies config_path must point to a valid json file.
_CreateBogusConfigFile()
run_analyzer()
EnsureError('Unable to parse config file test_file')
# Trivial test of a source.
_CreateConfigFile(['foo.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Conditional source that is excluded.
_CreateConfigFile(['conditional_source.c'], ['all'])
run_analyzer()
EnsureContains(matched=False)
# Conditional source that is included by way of argument.
_CreateConfigFile(['conditional_source.c'], ['all'])
run_analyzer('-Dtest_variable=1')
EnsureContains(matched=True, compile_targets={'exe'})
# Two unknown files.
_CreateConfigFile(['unknown1.c', 'unoknow2.cc'], ['all'])
run_analyzer()
EnsureContains()
# Two unknown files.
_CreateConfigFile(['unknown1.c', 'subdir/subdir_sourcex.c'], ['all'])
run_analyzer()
EnsureContains()
# Included dependency
_CreateConfigFile(['unknown1.c', 'subdir/subdir_source.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe', 'exe3'})
# Included inputs to actions.
_CreateConfigFile(['action_input.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Don't consider outputs.
_CreateConfigFile(['action_output.c'], ['all'])
run_analyzer()
EnsureContains(matched=False)
# Rule inputs.
_CreateConfigFile(['rule_input.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Ignore path specified with PRODUCT_DIR.
_CreateConfigFile(['product_dir_input.c'], ['all'])
run_analyzer()
EnsureContains(matched=False)
# Path specified via a variable.
_CreateConfigFile(['subdir/subdir_source2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Verifies paths with // are fixed up correctly.
_CreateConfigFile(['parent_source.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe', 'exe3'})
# Verifies relative paths are resolved correctly.
_CreateConfigFile(['subdir/subdir_source.h'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Verifies relative paths in inputs are resolved correctly.
_CreateConfigFile(['rel_path1.h'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Various permutations when passing in targets.
_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'],
['all'], ['exe', 'exe3'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe3'},
compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'], ['all'], ['exe'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
# Verifies duplicates are ignored.
_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'], ['all'],
['exe', 'exe'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe2.c'], ['all'], ['exe'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'})
_CreateConfigFile(['exe2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'})
_CreateConfigFile(['subdir/subdir2b_source.c', 'exe2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['subdir/subdir2b_source.c'], ['all'], ['exe3'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe3'}, compile_targets={'exe3'})
_CreateConfigFile(['exe2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'})
_CreateConfigFile(['foo.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Assertions when modifying build (gyp/gypi) files, especially when said files
# are included.
_CreateConfigFile(['subdir2/d.cc'], ['all'], ['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'foo'},
compile_targets={'exe'})
_CreateConfigFile(['subdir2/subdir.includes.gypi'], ['all'],
['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'foo'},
compile_targets={'exe'})
_CreateConfigFile(['subdir2/subdir.gyp'], ['all'],
['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'foo'},
compile_targets={'exe'})
_CreateConfigFile(['test2.includes.gypi'], ['all'],
['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'exe2', 'exe3'},
compile_targets={'exe', 'exe2', 'exe3'})
# Verify modifying a file included makes all targets dirty.
_CreateConfigFile(['common.gypi'], ['all'], ['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2('-Icommon.gypi')
EnsureMatchedAll({'all', 'exe', 'exe2', 'foo', 'exe3'},
{'exe', 'exe2', 'foo', 'exe3'})
# Assertions from test3.gyp.
_CreateConfigFile(['d.c', 'f.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['f.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['f.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a', 'b'})
_CreateConfigFile(['c.c', 'e.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a', 'b', 'c', 'e'})
_CreateConfigFile(['d.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['a.c'], ['all'], ['a', 'b'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
_CreateConfigFile(['a.c'], ['all'], ['a', 'b'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
_CreateConfigFile(['d.c'], ['all'], ['a', 'b'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a', 'b'},
compile_targets={'a', 'b'})
_CreateConfigFile(['f.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['a.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
_CreateConfigFile(['a.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a'})
_CreateConfigFile(['d.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a', 'b'})
# Assertions around test4.gyp.
_CreateConfigFile(['f.c'], ['all'])
run_analyzer4()
EnsureContains(matched=True, compile_targets={'e', 'f'})
_CreateConfigFile(['d.c'], ['all'])
run_analyzer4()
EnsureContains(matched=True, compile_targets={'a', 'b', 'c', 'd'})
_CreateConfigFile(['i.c'], ['all'])
run_analyzer4()
EnsureContains(matched=True, compile_targets={'h', 'i'})
# Assertions where 'all' is not supplied in compile_targets.
_CreateConfigFile(['exe2.c'], [], ['exe2'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe2'}, compile_targets={'exe2'})
_CreateConfigFile(['exe20.c'], [], ['exe2'])
run_analyzer()
EnsureContains(matched=False)
_CreateConfigFile(['exe2.c', 'exe3.c'], [], ['exe2', 'exe3'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe2', 'exe3'},
compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe2.c', 'exe3.c'], ['exe3'], ['exe2'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe2'},
compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe3.c'], ['exe2'], ['exe2'])
run_analyzer()
EnsureContains(matched=False)
# Assertions with 'all' listed as a test_target.
_CreateConfigFile(['exe3.c'], [], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe3'}, test_targets={'all'})
_CreateConfigFile(['exe2.c'], [], ['all', 'exe2'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'},
test_targets={'all', 'exe2'})
test.pass_test()
| pnigos/gyp | test/analyzer/gyptest-analyzer.py | Python | bsd-3-clause | 13,761 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('entries', '0005_resultsmode_json'),
]
operations = [
migrations.AlterField(
model_name='resultsmode',
name='json',
field=models.TextField(default='', blank=True),
),
]
| mjtamlyn/archery-scoring | entries/migrations/0006_auto_20150612_2307.py | Python | bsd-3-clause | 372 |
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import durationToSeconds, ipAddressToShow, ircLower, now
from zope.interface import implementer
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, List, Optional, Tuple
irc.RPL_WHOWASIP = "379"
@implementer(IPlugin, IModuleData, ICommand)
class WhowasCommand(ModuleData, Command):
name = "WhowasCommand"
core = True
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("quit", 10, self.addUserToWhowas),
("remotequit", 10, self.addUserToWhowas),
("localquit", 10, self.addUserToWhowas) ]
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("WHOWAS", 1, self) ]
def load(self) -> None:
if "whowas" not in self.ircd.storage:
self.ircd.storage["whowas"] = {}
def verifyConfig(self, config: Dict[str, Any]) -> None:
if "whowas_duration" in config and not isinstance(config["whowas_duration"], str) and not isinstance(config["whowas_duration"], int):
raise ConfigValidationError("whowas_duration", "value must be an integer or a duration string")
if "whowas_max_entries" in config and (not isinstance(config["whowas_max_entries"], int) or config["whowas_max_entries"] < 0):
raise ConfigValidationError("whowas_max_entries", "invalid number")
def removeOldEntries(self, whowasEntries: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
expireDuration = durationToSeconds(self.ircd.config.get("whowas_duration", "1d"))
maxCount = self.ircd.config.get("whowas_max_entries", 10)
while whowasEntries and len(whowasEntries) > maxCount:
whowasEntries.pop(0)
expireDifference = timedelta(seconds=expireDuration)
expireTime = now() - expireDifference
while whowasEntries and whowasEntries[0]["when"] < expireTime:
whowasEntries.pop(0)
return whowasEntries
def addUserToWhowas(self, user: "IRCUser", reason: str, fromServer: "IRCServer" = None) -> None:
if not user.isRegistered():
# user never registered a nick, so no whowas entry to add
return
lowerNick = ircLower(user.nick)
allWhowas = self.ircd.storage["whowas"]
if lowerNick in allWhowas:
whowasEntries = allWhowas[lowerNick]
else:
whowasEntries = []
serverName = self.ircd.name
if user.uuid[:3] != self.ircd.serverID:
serverName = self.ircd.servers[user.uuid[:3]].name
whowasEntries.append({
"nick": user.nick,
"ident": user.ident,
"host": user.host(),
"realhost": user.realHost,
"ip": ipAddressToShow(user.ip),
"gecos": user.gecos,
"server": serverName,
"when": now()
})
whowasEntries = self.removeOldEntries(whowasEntries)
if whowasEntries:
allWhowas[lowerNick] = whowasEntries
elif lowerNick in allWhowas:
del allWhowas[lowerNick]
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("WhowasCmd", irc.ERR_NEEDMOREPARAMS, "WHOWAS", "Not enough parameters")
return None
lowerParam = ircLower(params[0])
if lowerParam not in self.ircd.storage["whowas"]:
user.sendSingleError("WhowasNick", irc.ERR_WASNOSUCHNICK, params[0], "There was no such nickname")
return None
return {
"nick": lowerParam,
"param": params[0]
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
nick = data["nick"]
allWhowas = self.ircd.storage["whowas"]
whowasEntries = allWhowas[nick]
whowasEntries = self.removeOldEntries(whowasEntries)
if not whowasEntries:
del allWhowas[nick]
self.ircd.storage["whowas"] = allWhowas
user.sendMessage(irc.ERR_WASNOSUCHNICK, data["param"], "There was no such nickname")
return True
allWhowas[nick] = whowasEntries # Save back to the list excluding the removed entries
self.ircd.storage["whowas"] = allWhowas
for entry in whowasEntries:
entryNick = entry["nick"]
user.sendMessage(irc.RPL_WHOWASUSER, entryNick, entry["ident"], entry["host"], "*", entry["gecos"])
if self.ircd.runActionUntilValue("userhasoperpermission", user, "whowas-host", users=[user]):
user.sendMessage(irc.RPL_WHOWASIP, entryNick, "was connecting from {}@{} {}".format(entry["ident"], entry["realhost"], entry["ip"]))
user.sendMessage(irc.RPL_WHOISSERVER, entryNick, entry["server"], str(entry["when"]))
user.sendMessage(irc.RPL_ENDOFWHOWAS, nick, "End of WHOWAS")
return True
whowasCmd = WhowasCommand() | Heufneutje/txircd | txircd/modules/rfc/cmd_whowas.py | Python | bsd-3-clause | 4,564 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(r'^' + settings.ADMIN_URL, include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# User management
url(r'^users/', include("therapyinvoicing.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^customers/', include("therapyinvoicing.customers.urls", namespace="customers")),
url(r'^customerinvoicing/', include("therapyinvoicing.customerinvoicing.urls", namespace="customerinvoicing")),
url(r'^kelainvoicing/', include("therapyinvoicing.kelainvoicing.urls", namespace="kelainvoicing")),
url(r'^api/', include("therapyinvoicing.api.urls", namespace="api")),
url(r'^reporting/', include("therapyinvoicing.reporting.urls", namespace="reporting")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| ylitormatech/terapialaskutus | config/urls.py | Python | bsd-3-clause | 1,837 |
import json
from json_url_rewriter import config
from json_url_rewriter.rewrite import URLRewriter
class HeaderToPathPrefixRewriter(object):
"""
A rewriter to take the value of a header and prefix any path.
"""
def __init__(self, keys, base, header_name):
self.keys = keys
self.base = base
self.header_name = header_name
@property
def regex(self):
return '(%s)(.*)' % self.base
def header(self):
return 'HTTP_' + self.header_name.upper().replace('-', '_')
def __call__(self, doc, environ):
key = self.header()
if not key in environ:
return doc
prefix = environ[key]
def replacement(match):
base, path = match.groups()
return '%s/%s%s' % (base, prefix, path)
rewriter = URLRewriter(self.keys, self.regex, replacement)
return rewriter(doc)
class RewriteMiddleware(object):
def __init__(self, app, rewriter):
self.app = app
self.rewriter = rewriter
@staticmethod
def content_type(headers):
return dict([(k.lower(), v) for k, v in headers]).get('content-type')
def is_json(self, headers):
return 'json' in self.content_type(headers)
@staticmethod
def ok(status):
return status.startswith('20')
def rewrite(self, resp, environ):
doc = self.rewriter(self.json(resp), environ)
return json.dumps(doc)
def json(self, resp):
return json.loads(''.join(resp))
def __call__(self, environ, start_response):
# Set a local variable for the request
self.do_rewrite = False
# Our request local start response wrapper to grab the
# response headers
def sr(status, response_headers, exc_info=None):
if self.ok(status) and self.is_json(response_headers):
self.do_rewrite = True
# Call the original start_response
return start_response(status, response_headers, exc_info)
# call our app
resp = self.app(environ, sr)
# Our local variable should have been set to True if we should
# rewrite
if self.do_rewrite:
return [self.rewrite(resp, environ)]
return resp
def json_url_rewriter_filter_factory(global_conf, *args, **kw):
print(global_conf, args, kw)
raise Exception('Blastoff')
| ionrock/json_url_rewriter | json_url_rewriter/middleware.py | Python | bsd-3-clause | 2,398 |
def extractSweetjamtranslationsCom(item):
'''
Parser for 'sweetjamtranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractSweetjamtranslationsCom.py | Python | bsd-3-clause | 561 |
def extractMiratlsWordpressCom(item):
'''
Parser for 'miratls.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractMiratlsWordpressCom.py | Python | bsd-3-clause | 554 |
from textwrap import dedent
from sympy import (
symbols, Integral, Tuple, Dummy, Basic, default_sort_key, Matrix,
factorial, true)
from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation
from sympy.utilities.iterables import (
_partition, _set_partitions, binary_partitions, bracelets, capture,
cartes, common_prefix, common_suffix, dict_merge, flatten,
generate_bell, generate_derangements, generate_involutions,
generate_oriented_forest, group, has_dups, kbins, minlex, multiset,
multiset_combinations, multiset_partitions, multiset_permutations,
necklaces, numbered_symbols, ordered, partitions, permutations,
postfixes, postorder_traversal, prefixes, reshape, rotate_left,
rotate_right, runs, sift, subsets, take, topological_sort, unflatten,
uniq, variations)
from sympy.core.singleton import S
from sympy.functions.elementary.piecewise import Piecewise, ExprCondPair
from sympy.utilities.pytest import raises
w, x, y, z = symbols('w,x,y,z')
def test_postorder_traversal():
expr = z + w*(x + y)
expected = [z, w, x, y, x + y, w*(x + y), w*(x + y) + z]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(expr, keys=True)) == expected
expr = Piecewise((x, x < 1), (x**2, True))
expected = [
x, 1, x, x < 1, ExprCondPair(x, x < 1),
2, x, x**2, true,
ExprCondPair(x**2, True), Piecewise((x, x < 1), (x**2, True))
]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(
[expr], keys=default_sort_key)) == expected + [[expr]]
assert list(postorder_traversal(Integral(x**2, (x, 0, 1)),
keys=default_sort_key)) == [
2, x, x**2, 0, 1, x, Tuple(x, 0, 1),
Integral(x**2, Tuple(x, 0, 1))
]
assert list(postorder_traversal(('abc', ('d', 'ef')))) == [
'abc', 'd', 'ef', ('d', 'ef'), ('abc', ('d', 'ef'))]
def test_flatten():
assert flatten((1, (1,))) == [1, 1]
assert flatten((x, (x,))) == [x, x]
ls = [[(-2, -1), (1, 2)], [(0, 0)]]
assert flatten(ls, levels=0) == ls
assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)]
assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0]
assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0]
raises(ValueError, lambda: flatten(ls, levels=-1))
class MyOp(Basic):
pass
assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z]
assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z]
assert flatten(set([1, 11, 2])) == list(set([1, 11, 2]))
def test_group():
assert group([]) == []
assert group([], multiple=False) == []
assert group([1]) == [[1]]
assert group([1], multiple=False) == [(1, 1)]
assert group([1, 1]) == [[1, 1]]
assert group([1, 1], multiple=False) == [(1, 2)]
assert group([1, 1, 1]) == [[1, 1, 1]]
assert group([1, 1, 1], multiple=False) == [(1, 3)]
assert group([1, 2, 1]) == [[1], [2], [1]]
assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)]
assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]]
assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2),
(2, 3), (1, 1), (3, 2)]
def test_subsets():
# combinations
assert list(subsets([1, 2, 3], 0)) == [()]
assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)]
assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)]
l = list(range(4))
assert list(subsets(l, 0, repetition=True)) == [()]
assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 1), (1, 2),
(1, 3), (2, 2), (2, 3),
(3, 3)]
assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1),
(0, 0, 2), (0, 0, 3),
(0, 1, 1), (0, 1, 2),
(0, 1, 3), (0, 2, 2),
(0, 2, 3), (0, 3, 3),
(1, 1, 1), (1, 1, 2),
(1, 1, 3), (1, 2, 2),
(1, 2, 3), (1, 3, 3),
(2, 2, 2), (2, 2, 3),
(2, 3, 3), (3, 3, 3)]
assert len(list(subsets(l, 4, repetition=True))) == 35
assert list(subsets(l[:2], 3, repetition=False)) == []
assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0),
(0, 0, 1),
(0, 1, 1),
(1, 1, 1)]
assert list(subsets([1, 2], repetition=True)) == \
[(), (1,), (2,), (1, 1), (1, 2), (2, 2)]
assert list(subsets([1, 2], repetition=False)) == \
[(), (1,), (2,), (1, 2)]
assert list(subsets([1, 2, 3], 2)) == \
[(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 2, repetition=True)) == \
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
def test_variations():
# permutations
l = list(range(4))
assert list(variations(l, 0, repetition=False)) == [()]
assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)]
assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
assert list(variations(l, 0, repetition=True)) == [()]
assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 0), (1, 1),
(1, 2), (1, 3), (2, 0),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2),
(3, 3)]
assert len(list(variations(l, 3, repetition=True))) == 64
assert len(list(variations(l, 4, repetition=True))) == 256
assert list(variations(l[:2], 3, repetition=False)) == []
assert list(variations(l[:2], 3, repetition=True)) == [
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)
]
def test_cartes():
assert list(cartes([1, 2], [3, 4, 5])) == \
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)]
assert list(cartes()) == [()]
assert list(cartes('a')) == [('a',)]
assert list(cartes('a', repeat=2)) == [('a', 'a')]
assert list(cartes(list(range(2)))) == [(0,), (1,)]
def test_numbered_symbols():
s = numbered_symbols(cls=Dummy)
assert isinstance(next(s), Dummy)
def test_sift():
assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]}
assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]}
assert sift([S.One], lambda _: _.has(x)) == {False: [1]}
def test_take():
X = numbered_symbols()
assert take(X, 5) == list(symbols('x0:5'))
assert take(X, 5) == list(symbols('x5:10'))
assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5]
def test_dict_merge():
assert dict_merge({}, {1: x, y: z}) == {1: x, y: z}
assert dict_merge({1: x, y: z}, {}) == {1: x, y: z}
assert dict_merge({2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {2: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: y, 2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {1: y, 2: z}) == {1: y, 2: z, y: z}
def test_prefixes():
assert list(prefixes([])) == []
assert list(prefixes([1])) == [[1]]
assert list(prefixes([1, 2])) == [[1], [1, 2]]
assert list(prefixes([1, 2, 3, 4, 5])) == \
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]
def test_postfixes():
assert list(postfixes([])) == []
assert list(postfixes([1])) == [[1]]
assert list(postfixes([1, 2])) == [[2], [1, 2]]
assert list(postfixes([1, 2, 3, 4, 5])) == \
[[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]]
def test_topological_sort():
V = [2, 3, 5, 7, 8, 9, 10, 11]
E = [(7, 11), (7, 8), (5, 11),
(3, 8), (3, 10), (11, 2),
(11, 9), (11, 10), (8, 9)]
assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10]
assert topological_sort((V, E), key=lambda v: -v) == \
[7, 5, 11, 3, 10, 8, 9, 2]
raises(ValueError, lambda: topological_sort((V, E + [(10, 7)])))
def test_rotate():
A = [0, 1, 2, 3, 4]
assert rotate_left(A, 2) == [2, 3, 4, 0, 1]
assert rotate_right(A, 1) == [4, 0, 1, 2, 3]
A = []
B = rotate_right(A, 1)
assert B == []
B.append(1)
assert A == []
B = rotate_left(A, 1)
assert B == []
B.append(1)
assert A == []
def test_multiset_partitions():
A = [0, 1, 2, 3, 4]
assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]]
assert len(list(multiset_partitions(A, 4))) == 10
assert len(list(multiset_partitions(A, 3))) == 25
assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [
[[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]],
[[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]]
assert list(multiset_partitions([1, 1, 2, 2], 2)) == [
[[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]],
[[1, 2], [1, 2]]]
assert list(multiset_partitions([1, 2, 3, 4], 2)) == [
[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
assert list(multiset_partitions([1, 2, 2], 2)) == [
[[1, 2], [2]], [[1], [2, 2]]]
assert list(multiset_partitions(3)) == [
[[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]],
[[0], [1], [2]]]
assert list(multiset_partitions(3, 2)) == [
[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]]
assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]]
assert list(multiset_partitions([1] * 3)) == [
[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
a = [3, 2, 1]
assert list(multiset_partitions(a)) == \
list(multiset_partitions(sorted(a)))
assert list(multiset_partitions(a, 5)) == []
assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]]
assert list(multiset_partitions(a + [4], 5)) == []
assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]]
assert list(multiset_partitions(2, 5)) == []
assert list(multiset_partitions(2, 1)) == [[[0, 1]]]
assert list(multiset_partitions('a')) == [[['a']]]
assert list(multiset_partitions('a', 2)) == []
assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]]
assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]]
assert list(multiset_partitions('aaa', 1)) == [['aaa']]
assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]]
def test_multiset_combinations():
ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips',
'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss']
assert [''.join(i) for i in
list(multiset_combinations('mississippi', 3))] == ans
M = multiset('mississippi')
assert [''.join(i) for i in
list(multiset_combinations(M, 3))] == ans
assert [''.join(i) for i in multiset_combinations(M, 30)] == []
assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]]
assert len(list(multiset_combinations('a', 3))) == 0
assert len(list(multiset_combinations('a', 0))) == 1
assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']]
def test_multiset_permutations():
ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
'byba', 'yabb', 'ybab', 'ybba']
assert [''.join(i) for i in multiset_permutations('baby')] == ans
assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
assert list(multiset_permutations([0, 2, 1], 2)) == [
[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
assert len(list(multiset_permutations('a', 0))) == 1
assert len(list(multiset_permutations('a', 3))) == 0
def test():
for i in range(1, 7):
print(i)
for p in multiset_permutations([0, 0, 1, 0, 1], i):
print(p)
assert capture(lambda: test()) == dedent('''\
1
[0]
[1]
2
[0, 0]
[0, 1]
[1, 0]
[1, 1]
3
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
4
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 1, 1]
[0, 1, 0, 0]
[0, 1, 0, 1]
[0, 1, 1, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[1, 0, 1, 0]
[1, 1, 0, 0]
5
[0, 0, 0, 1, 1]
[0, 0, 1, 0, 1]
[0, 0, 1, 1, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 1, 0]
[0, 1, 1, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 0]
[1, 0, 1, 0, 0]
[1, 1, 0, 0, 0]
6\n''')
def test_partitions():
assert [p.copy() for p in partitions(6, k=2)] == [
{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=3)] == [
{3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},
{1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=2, m=2)] == []
assert [p.copy() for p in partitions(8, k=4, m=3)] == [
{4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [
i.copy() for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)
and sum(i.values()) <=3]
assert [p.copy() for p in partitions(S(3), m=2)] == [
{3: 1}, {1: 1, 2: 1}]
assert [i.copy() for i in partitions(4, k=3)] == [
{1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [
i.copy() for i in partitions(4) if all(k <= 3 for k in i)]
raises(ValueError, lambda: list(partitions(3, 0)))
# Consistency check on output of _partitions and RGS_unrank.
# This provides a sanity test on both routines. Also verifies that
# the total number of partitions is the same in each case.
# (from pkrathmann2)
for n in range(2, 6):
i = 0
for m, q in _set_partitions(n):
assert q == RGS_unrank(i, n)
i = i+1
assert i == RGS_enum(n)
def test_binary_partitions():
assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1],
[4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1],
[4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert len([j[:] for j in binary_partitions(16)]) == 36
def test_bell_perm():
assert [len(list(generate_bell(i))) for i in range(1, 7)] == [
factorial(i) for i in range(1, 7)]
assert list(generate_bell(3)) == [
(0, 1, 2), (1, 0, 2), (1, 2, 0), (2, 1, 0), (2, 0, 1), (0, 2, 1)]
def test_involutions():
lengths = [1, 2, 4, 10, 26, 76]
for n, N in enumerate(lengths):
i = list(generate_involutions(n + 1))
assert len(i) == N
assert len(set([Permutation(j)**2 for j in i])) == 1
def test_derangements():
assert len(list(generate_derangements(list(range(6))))) == 265
assert ''.join(''.join(i) for i in generate_derangements('abcde')) == (
'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd'
'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab'
'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb'
'edbacedbca')
assert list(generate_derangements([0, 1, 2, 3])) == [
[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1],
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]]
assert list(generate_derangements([0, 1, 2, 2])) == [
[2, 2, 0, 1], [2, 2, 1, 0]]
def test_necklaces():
def count(n, k, f):
return len(list(necklaces(n, k, f)))
m = []
for i in range(1, 8):
m.append((
i, count(i, 2, 0), count(i, 2, 1), count(i, 3, 1)))
assert Matrix(m) == Matrix([
[1, 2, 2, 3],
[2, 3, 3, 6],
[3, 4, 4, 10],
[4, 6, 6, 21],
[5, 8, 8, 39],
[6, 14, 13, 92],
[7, 20, 18, 198]])
def test_generate_oriented_forest():
assert list(generate_oriented_forest(5)) == [[0, 1, 2, 3, 4],
[0, 1, 2, 3, 3], [0, 1, 2, 3, 2], [0, 1, 2, 3, 1], [0, 1, 2, 3, 0],
[0, 1, 2, 2, 2], [0, 1, 2, 2, 1], [0, 1, 2, 2, 0], [0, 1, 2, 1, 2],
[0, 1, 2, 1, 1], [0, 1, 2, 1, 0], [0, 1, 2, 0, 1], [0, 1, 2, 0, 0],
[0, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 0, 1], [0, 1, 1, 0, 0],
[0, 1, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]]
assert len(list(generate_oriented_forest(10))) == 1842
def test_unflatten():
r = list(range(10))
assert unflatten(r) == list(zip(r[::2], r[1::2]))
assert unflatten(r, 5) == [tuple(r[:5]), tuple(r[5:])]
raises(ValueError, lambda: unflatten(list(range(10)), 3))
raises(ValueError, lambda: unflatten(list(range(10)), -2))
def test_common_prefix_suffix():
assert common_prefix([], [1]) == []
assert common_prefix(list(range(3))) == [0, 1, 2]
assert common_prefix(list(range(3)), list(range(4))) == [0, 1, 2]
assert common_prefix([1, 2, 3], [1, 2, 5]) == [1, 2]
assert common_prefix([1, 2, 3], [1, 3, 5]) == [1]
assert common_suffix([], [1]) == []
assert common_suffix(list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(4))) == []
assert common_suffix([1, 2, 3], [9, 2, 3]) == [2, 3]
assert common_suffix([1, 2, 3], [9, 7, 3]) == [3]
def test_minlex():
assert minlex([1, 2, 0]) == (0, 1, 2)
assert minlex((1, 2, 0)) == (0, 1, 2)
assert minlex((1, 0, 2)) == (0, 2, 1)
assert minlex((1, 0, 2), directed=False) == (0, 1, 2)
assert minlex('aba') == 'aab'
def test_ordered():
assert list(ordered((x, y), hash, default=False)) in [[x, y], [y, x]]
assert list(ordered((x, y), hash, default=False)) == \
list(ordered((y, x), hash, default=False))
assert list(ordered((x, y))) == [x, y]
seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]],
(lambda x: len(x), lambda x: sum(x))]
assert list(ordered(seq, keys, default=False, warn=False)) == \
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
raises(ValueError, lambda:
list(ordered(seq, keys, default=False, warn=True)))
def test_runs():
assert runs([]) == []
assert runs([1]) == [[1]]
assert runs([1, 1]) == [[1], [1]]
assert runs([1, 1, 2]) == [[1], [1, 2]]
assert runs([1, 2, 1]) == [[1, 2], [1]]
assert runs([2, 1, 1]) == [[2], [1], [1]]
from operator import lt
assert runs([2, 1, 1], lt) == [[2, 1], [1]]
def test_reshape():
seq = list(range(1, 9))
assert reshape(seq, [4]) == \
[[1, 2, 3, 4], [5, 6, 7, 8]]
assert reshape(seq, (4,)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, 2)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, [2])) == \
[(1, 2, [3, 4]), (5, 6, [7, 8])]
assert reshape(seq, ((2,), [2])) == \
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
assert reshape(seq, (1, [2], 1)) == \
[(1, [2, 3], 4), (5, [6, 7], 8)]
assert reshape(tuple(seq), ([[1], 1, (2,)],)) == \
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
assert reshape(tuple(seq), ([1], 1, (2,))) == \
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
assert reshape(list(range(12)), [2, [3], set([2]), (1, (3,), 1)]) == \
[[0, 1, [2, 3, 4], set([5, 6]), (7, (8, 9, 10), 11)]]
def test_uniq():
assert list(uniq(p.copy() for p in partitions(4))) == \
[{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}]
assert list(uniq(x % 2 for x in range(5))) == [0, 1]
assert list(uniq('a')) == ['a']
assert list(uniq('ababc')) == list('abc')
assert list(uniq([[1], [2, 1], [1]])) == [[1], [2, 1]]
assert list(uniq(permutations(i for i in [[1], 2, 2]))) == \
[([1], 2, 2), (2, [1], 2), (2, 2, [1])]
assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \
[2, 3, 4, [2], [1], [3]]
def test_kbins():
assert len(list(kbins('1123', 2, ordered=1))) == 24
assert len(list(kbins('1123', 2, ordered=11))) == 36
assert len(list(kbins('1123', 2, ordered=10))) == 10
assert len(list(kbins('1123', 2, ordered=0))) == 5
assert len(list(kbins('1123', 2, ordered=None))) == 3
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins([0, 0, 1], 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [0, 1]]
[[0, 0], [1]]
ordered = 0
[[0, 0], [1]]
[[0, 1], [0]]
ordered = 1
[[0], [0, 1]]
[[0], [1, 0]]
[[1], [0, 0]]
ordered = 10
[[0, 0], [1]]
[[1], [0, 0]]
[[0, 1], [0]]
[[0], [0, 1]]
ordered = 11
[[0], [0, 1]]
[[0, 0], [1]]
[[0], [1, 0]]
[[0, 1], [0]]
[[1], [0, 0]]
[[1, 0], [0]]\n''')
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins(list(range(3)), 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]\n''')
def test_has_dups():
assert has_dups(set()) is False
assert has_dups(list(range(3))) is False
assert has_dups([1, 2, 1]) is True
def test__partition():
assert _partition('abcde', [1, 0, 1, 2, 0]) == [
['b', 'e'], ['a', 'c'], ['d']]
assert _partition('abcde', [1, 0, 1, 2, 0], 3) == [
['b', 'e'], ['a', 'c'], ['d']]
output = (3, [1, 0, 1, 2, 0])
assert _partition('abcde', *output) == [['b', 'e'], ['a', 'c'], ['d']]
| hrashk/sympy | sympy/utilities/tests/test_iterables.py | Python | bsd-3-clause | 24,090 |
from pypom import Region
from selenium.webdriver.common.by import By
from base import Base
class Collections(Base):
"""Collections page."""
_item_locator = (By.CSS_SELECTOR, '.item')
def wait_for_page_to_load(self):
self.wait.until(lambda _: len(self.collections) > 0 and
self.collections[0].name)
return self
@property
def collections(self):
collections = self.find_elements(*self._item_locator)
return [self.Collection(self, el) for el in collections]
class Collection(Region):
"""Represents an individual collection."""
_name_locator = (By.CSS_SELECTOR, '.info > h3')
@property
def name(self):
return self.find_element(*self._name_locator).text
| harikishen/addons-server | tests/ui/pages/desktop/collections.py | Python | bsd-3-clause | 779 |
from __future__ import unicode_literals
from django.db import models
from modelcluster.fields import ParentalKey
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore import fields
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailadmin.edit_handlers import InlinePanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
class AboutPage(Page):
content = fields.RichTextField()
picture = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
content_panels = Page.content_panels + [
FieldPanel('content'),
ImageChooserPanel('picture'),
]
api_fields = ('content', 'picture',)
| abirafdirp/blog-wagtail | about/models.py | Python | bsd-3-clause | 785 |
import eqpy
import sympy
from eqpy._utils import raises
def test_constants():
assert eqpy.nums.Catalan is sympy.Catalan
assert eqpy.nums.E is sympy.E
assert eqpy.nums.EulerGamma is sympy.EulerGamma
assert eqpy.nums.GoldenRatio is sympy.GoldenRatio
assert eqpy.nums.I is sympy.I
assert eqpy.nums.nan is sympy.nan
assert eqpy.nums.oo is sympy.oo
assert eqpy.nums.pi is sympy.pi
assert eqpy.nums.zoo is sympy.zoo
def test_sympify():
eqpy.nums.x = '1/2'
assert eqpy.nums.x == sympy.S('1/2')
assert eqpy.nums('2/3') == sympy.S('2/3')
assert raises(sympy.SympifyError, lambda: eqpy.nums('1.2.3'))
def test_dunders():
eqpy.nums.__mydunder__ = '1/2'
assert eqpy.nums.__mydunder__ == '1/2'
| eriknw/eqpy | eqpy/tests/test_nums.py | Python | bsd-3-clause | 747 |
from contextlib import contextmanager
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.core import validators
from django.db import models
from django.utils import translation
from django.utils.encoding import smart_unicode
from django.utils.functional import lazy
import commonware.log
import tower
from cache_nuggets.lib import memoize
from tower import ugettext as _
import amo
import amo.models
from amo.urlresolvers import reverse
from mkt.translations.fields import NoLinksField, save_signal
from mkt.translations.query import order_by_translation
log = commonware.log.getLogger('z.users')
class UserForeignKey(models.ForeignKey):
"""
A replacement for models.ForeignKey('users.UserProfile').
This field uses UserEmailField to make form fields key off the user's email
instead of the primary key id. We also hook up autocomplete automatically.
"""
def __init__(self, *args, **kw):
super(UserForeignKey, self).__init__(UserProfile, *args, **kw)
def value_from_object(self, obj):
return getattr(obj, self.name).email
def formfield(self, **kw):
defaults = {'form_class': UserEmailField}
defaults.update(kw)
return models.Field.formfield(self, **defaults)
class UserEmailField(forms.EmailField):
def clean(self, value):
if value in validators.EMPTY_VALUES:
raise forms.ValidationError(self.error_messages['required'])
try:
return UserProfile.objects.get(email=value)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No user with that email.'))
def widget_attrs(self, widget):
lazy_reverse = lazy(reverse, str)
return {'class': 'email-autocomplete',
'data-src': lazy_reverse('users.ajax')}
AbstractBaseUser._meta.get_field('password').max_length = 255
class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase,
AbstractBaseUser):
USERNAME_FIELD = 'username'
username = models.CharField(max_length=255, default='', unique=True)
display_name = models.CharField(max_length=255, default='', null=True,
blank=True)
email = models.EmailField(unique=True, null=True)
averagerating = models.CharField(max_length=255, blank=True, null=True)
bio = NoLinksField(short=False)
confirmationcode = models.CharField(max_length=255, default='',
blank=True)
deleted = models.BooleanField(default=False)
display_collections = models.BooleanField(default=False)
display_collections_fav = models.BooleanField(default=False)
emailhidden = models.BooleanField(default=True)
homepage = models.URLField(max_length=255, blank=True, default='')
location = models.CharField(max_length=255, blank=True, default='')
notes = models.TextField(blank=True, null=True)
notifycompat = models.BooleanField(default=True)
notifyevents = models.BooleanField(default=True)
occupation = models.CharField(max_length=255, default='', blank=True)
# This is essentially a "has_picture" flag right now
picture_type = models.CharField(max_length=75, default='', blank=True)
resetcode = models.CharField(max_length=255, default='', blank=True)
resetcode_expires = models.DateTimeField(default=datetime.now, null=True,
blank=True)
read_dev_agreement = models.DateTimeField(null=True, blank=True)
last_login_ip = models.CharField(default='', max_length=45, editable=False)
last_login_attempt = models.DateTimeField(null=True, editable=False)
last_login_attempt_ip = models.CharField(default='', max_length=45,
editable=False)
failed_login_attempts = models.PositiveIntegerField(default=0,
editable=False)
source = models.PositiveIntegerField(default=amo.LOGIN_SOURCE_UNKNOWN,
editable=False, db_index=True)
is_verified = models.BooleanField(default=True)
region = models.CharField(max_length=11, null=True, blank=True,
editable=False)
lang = models.CharField(max_length=5, null=True, blank=True,
editable=False)
class Meta:
db_table = 'users'
def __init__(self, *args, **kw):
super(UserProfile, self).__init__(*args, **kw)
if self.username:
self.username = smart_unicode(self.username)
def __unicode__(self):
return u'%s: %s' % (self.id, self.display_name or self.username)
def save(self, force_insert=False, force_update=False, using=None, **kwargs):
# we have to fix stupid things that we defined poorly in remora
if not self.resetcode_expires:
self.resetcode_expires = datetime.now()
super(UserProfile, self).save(force_insert, force_update, using,
**kwargs)
@property
def is_superuser(self):
return self.groups.filter(rules='*:*').exists()
@property
def is_staff(self):
from mkt.access import acl
return acl.action_allowed_user(self, 'Admin', '%')
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
def get_backend(self):
return 'django_browserid.auth.BrowserIDBackend'
def set_backend(self, val):
pass
backend = property(get_backend, set_backend)
def is_anonymous(self):
return False
def get_url_path(self, src=None):
# See: bug 880767.
return '#'
def my_apps(self, n=8):
"""Returns n apps"""
qs = self.addons.filter(type=amo.ADDON_WEBAPP)
qs = order_by_translation(qs, 'name')
return qs[:n]
@amo.cached_property
def is_developer(self):
return self.addonuser_set.exists()
@property
def name(self):
return smart_unicode(self.display_name or self.username)
@amo.cached_property
def reviews(self):
"""All reviews that are not dev replies."""
qs = self._reviews_all.filter(reply_to=None)
# Force the query to occur immediately. Several
# reviews-related tests hang if this isn't done.
return qs
def anonymize(self):
log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email))
self.email = None
self.password = "sha512$Anonymous$Password"
self.username = "Anonymous-%s" % self.id # Can't be null
self.display_name = None
self.homepage = ""
self.deleted = True
self.picture_type = ""
self.save()
def check_password(self, raw_password):
# BrowserID does not store a password.
return True
def log_login_attempt(self, successful):
"""Log a user's login attempt"""
self.last_login_attempt = datetime.now()
self.last_login_attempt_ip = commonware.log.get_remote_addr()
if successful:
log.debug(u"User (%s) logged in successfully" % self)
self.failed_login_attempts = 0
self.last_login_ip = commonware.log.get_remote_addr()
else:
log.debug(u"User (%s) failed to log in" % self)
if self.failed_login_attempts < 16777216:
self.failed_login_attempts += 1
self.save()
def purchase_ids(self):
"""
I'm special casing this because we use purchase_ids a lot in the site
and we are not caching empty querysets in cache-machine.
That means that when the site is first launched we are having a
lot of empty queries hit.
We can probably do this in smarter fashion by making cache-machine
cache empty queries on an as need basis.
"""
# Circular import
from mkt.prices.models import AddonPurchase
@memoize(prefix='users:purchase-ids')
def ids(pk):
return (AddonPurchase.objects.filter(user=pk)
.values_list('addon_id', flat=True)
.filter(type=amo.CONTRIB_PURCHASE)
.order_by('pk'))
return ids(self.pk)
@contextmanager
def activate_lang(self):
"""
Activate the language for the user. If none is set will go to the site
default which is en-US.
"""
lang = self.lang if self.lang else settings.LANGUAGE_CODE
old = translation.get_language()
tower.activate(lang)
yield
tower.activate(old)
models.signals.pre_save.connect(save_signal, sender=UserProfile,
dispatch_uid='userprofile_translations')
class UserNotification(amo.models.ModelBase):
user = models.ForeignKey(UserProfile, related_name='notifications')
notification_id = models.IntegerField()
enabled = models.BooleanField(default=False)
class Meta:
db_table = 'users_notifications'
@staticmethod
def update_or_create(update={}, **kwargs):
rows = UserNotification.objects.filter(**kwargs).update(**update)
if not rows:
update.update(dict(**kwargs))
UserNotification.objects.create(**update)
| andymckay/zamboni | mkt/users/models.py | Python | bsd-3-clause | 9,476 |
def extractVodkatranslationsCom(item):
'''
Parser for 'vodkatranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Ordinary I and Extraordinary Them', 'Ordinary I and Extraordinary Them', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractVodkatranslationsCom.py | Python | bsd-3-clause | 671 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from aldryn_client import forms
class Form(forms.BaseForm):
plugin_module = forms.CharField('Plugin module name', initial='Generic')
plugin_name = forms.CharField('Plugin name', initial='Facebook Comments')
plugin_template = forms.CharField('Plugin Template', initial='djangocms_fbcomments/default.html')
app_id = forms.CharField('Facebook App ID', required=False)
def to_settings(self, data, settings):
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_MODULE'] = data['plugin_module']
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_NAME'] = data['plugin_name']
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_TEMPLATE'] = data['plugin_template']
settings['DJANGOCMS_FBCOMMENTS_APP_ID'] = data['app_id']
return settings
| mishbahr/djangocms-fbcomments | aldryn_config.py | Python | bsd-3-clause | 819 |
from django.conf.urls.defaults import *
from models import Entry, Tag
from django.views.generic.dates import ArchiveIndexView, DateDetailView
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^/?$', ArchiveIndexView.as_view(model=Entry, date_field="published_on"), name="news-main"),
# url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[0-9A-Za-z-]+)/$', 'date_based.object_detail', dict(entry_dict, slug_field='slug', month_format='%m'),name="news-detail"),
url(r'^(?P<year>\d+)/(?P<month>[-\w]+)/(?P<day>\d+)/(?P<pk>\d+)/$',
DateDetailView.as_view(model=Entry, date_field="published_on"),
name="news_detail"),
url(r'^about/$', TemplateView.as_view(template_name='news/about.html'), name='news-about'),
)
| underbluewaters/marinemap | lingcod/news/urls.py | Python | bsd-3-clause | 788 |
import logging
from django.http import HttpResponse
from receiver.submitresponse import SubmitResponse
def duplicate_attachment(way_handled, additional_params):
'''Return a custom http response associated the handling
of the xform. In this case, telling the sender that
they submitted a duplicate
'''
try:
# NOTE: this possibly shouldn't be a "200" code, but it is for
# now because it's not clear how JavaRosa will handle 202.
# see: http://code.dimagi.com/JavaRosa/wiki/ServerResponseFormat
response = SubmitResponse(status_code=200, or_status_code=2020,
or_status="Duplicate Submission.",
submit_id=way_handled.submission.id,
**additional_params)
return response.to_response()
except Exception, e:
logging.error("Problem in properly responding to instance data handling of %s" %
way_handled)
| icomms/wqmanager | apps/receiver/__init__.py | Python | bsd-3-clause | 1,031 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.test import TestCase
from models import Student, StudyGroup, Task, Lab, Subject, GroupSubject
class PortalTest(TestCase):
def setUp(self):
self.study_group1 = StudyGroup.objects.create(name="10А")
self.study_group2 = StudyGroup.objects.create(name="11Б")
self.subject1 = Subject.objects.create(name="Оптика")
self.subject2 = Subject.objects.create(name="Механика")
self.group_subject11 = GroupSubject.objects.create(
study_group=self.study_group1, subject=self.subject1
)
self.group_subject22 = GroupSubject.objects.create(
study_group=self.study_group2, subject=self.subject2
)
self.student1 = Student.objects.create_user(
username="ivan", email=None, password="123456", study_group=self.study_group1
)
self.student2 = Student.objects.create_user(
username="pavel", email=None, password="123456", study_group=self.study_group2
)
self.lab1 = Lab.objects.create(name="Кольца ньютона", subject=self.subject1)
self.lab2 = Lab.objects.create(name="Атвуд", subject=self.subject2)
def test_task_create(self):
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertFalse(has_error)
def test_task_create_double(self):
"""
Должна выскочить ошибка валидации - пытаемся создать 2 одинаковых задания
:return:
"""
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertTrue(has_error)
# Проверяем что по данной учебной группе есть только одно задание
subject = self.group_subject11.subject
study_group = self.group_subject11.study_group
task_count = Task.objects.filter(
lab__subject__pk=subject.id, student__study_group__pk=study_group.id
).count()
self.assertTrue(task_count, 1)
| vinneyto/lab-portal | portal/test_models.py | Python | bsd-3-clause | 2,587 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Windowing and user-interface events.
This module allows applications to create and display windows with an
OpenGL context. Windows can be created with a variety of border styles
or set fullscreen.
You can register event handlers for keyboard, mouse and window events.
For games and kiosks you can also restrict the input to your windows,
for example disabling users from switching away from the application
with certain key combinations or capturing and hiding the mouse.
Getting started
---------------
Call the Window constructor to create a new window::
from pyglet.window import Window
win = Window(width=640, height=480)
Attach your own event handlers::
@win.event
def on_key_press(symbol, modifiers):
# ... handle this event ...
Within your main run loop, you must call `Window.dispatch_events` regularly.
Windows are double-buffered by default, so you must call `Window.flip` to
update the display::
while not win.has_exit:
win.dispatch_events()
# ... drawing commands ...
win.flip()
Creating a game window
----------------------
Use `Window.set_exclusive_mouse` to hide the mouse cursor and receive relative
mouse movement events. Specify ``fullscreen=True`` as a keyword argument to
the `Window` constructor to render to the entire screen rather than opening a
window::
win = Window(fullscreen=True)
win.set_mouse_exclusive()
Working with multiple windows
-----------------------------
You can open any number of windows and render to them individually. Each
window must have the event handlers set on it that you are interested in
(i.e., each window will have its own mouse event handler).
You must call `Window.dispatch_events` for each window. Before rendering
to a window, you must call `Window.switch_to` to set the active GL context.
Here is an example run loop for a list of windows::
windows = # list of Window instances
while windows:
for win in windows:
win.dispatch_events()
if win.has_exit:
win.close()
windows = [w for w in windows if not w.has_exit]
for win in windows:
win.switch_to()
# ... drawing commands for this window ...
win.flip()
Working with multiple screens
-----------------------------
By default, fullscreen windows are opened on the primary display (typically
set by the user in their operating system settings). You can retrieve a list
of attached screens and select one manually if you prefer. This is useful for
opening a fullscreen window on each screen::
display = window.get_platform().get_default_display()
screens = display.get_screens()
windows = []
for screen in screens:
windows.append(window.Window(fullscreen=True, screen=screen))
Specifying a screen has no effect if the window is not fullscreen.
Specifying the OpenGL context properties
----------------------------------------
Each window has its own context which is created when the window is created.
You can specify the properties of the context before it is created
by creating a "template" configuration::
from pyglet import gl
# Create template config
config = gl.Config()
config.stencil_size = 8
config.aux_buffers = 4
# Create a window using this config
win = window.Window(config=config)
To determine if a given configuration is supported, query the screen (see
above, "Working with multiple screens")::
configs = screen.get_matching_configs(config)
if not configs:
# ... config is not supported
else:
win = window.Window(config=configs[0])
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 1195 2007-08-24 09:38:40Z Alex.Holkner $'
import pprint
import sys
from pyglet import gl
from pyglet.gl import gl_info
from pyglet.event import EventDispatcher
from pyglet.window.event import WindowExitHandler
import pyglet.window.key
class WindowException(Exception):
'''The root exception for all window-related errors.'''
pass
class NoSuchDisplayException(WindowException):
'''An exception indicating the requested display is not available.'''
pass
class NoSuchConfigException(WindowException):
'''An exception indicating the requested configuration is not
available.'''
pass
class MouseCursorException(WindowException):
'''The root exception for all mouse cursor-related errors.'''
pass
class Platform(object):
'''Operating-system-level functionality.
The platform instance can only be obtained with `get_platform`. Use
the platform to obtain a `Display` instance.
'''
def get_display(self, name):
'''Get a display device by name.
This is meaningful only under X11, where the `name` is a
string including the host name and display number; for example
``"localhost:1"``.
On platforms other than X11, `name` is ignored and the default
display is returned. pyglet does not support multiple multiple
video devices on Windows or OS X. If more than one device is
attached, they will appear as a single virtual device comprising
all the attached screens.
:Parameters:
`name` : str
The name of the display to connect to.
:rtype: `Display`
'''
return get_default_display()
def get_default_display(self):
'''Get the default display device.
:rtype: `Display`
'''
raise NotImplementedError('abstract')
class Display(object):
'''A display device supporting one or more screens.
Use `Platform.get_display` or `Platform.get_default_display` to obtain
an instance of this class. Use a display to obtain `Screen` instances.
'''
def __init__(self):
self._windows = []
def get_screens(self):
'''Get the available screens.
A typical multi-monitor workstation comprises one `Display` with
multiple `Screen` s. This method returns a list of screens which
can be enumerated to select one for full-screen display.
For the purposes of creating an OpenGL config, the default screen
will suffice.
:rtype: list of `Screen`
'''
raise NotImplementedError('abstract')
def get_default_screen(self):
'''Get the default screen as specified by the user's operating system
preferences.
:rtype: `Screen`
'''
return self.get_screens()[0]
def get_windows(self):
'''Get the windows currently attached to this display.
:rtype: sequence of `Window`
'''
return self._windows
class Screen(object):
'''A virtual monitor that supports fullscreen windows.
Screens typically map onto a physical display such as a
monitor, television or projector. Selecting a screen for a window
has no effect unless the window is made fullscreen, in which case
the window will fill only that particular virtual screen.
The `width` and `height` attributes of a screen give the current
resolution of the screen. The `x` and `y` attributes give the global
location of the top-left corner of the screen. This is useful for
determining if screens arranged above or next to one another.
You cannot always rely on the origin to give the placement of monitors.
For example, an X server with two displays without Xinerama enabled
will present two logically separate screens with no relation to each
other.
Use `Display.get_screens` or `Display.get_default_screen` to obtain an
instance of this class.
:Ivariables:
`x` : int
Left edge of the screen on the virtual desktop.
`y` : int
Top edge of the screen on the virtual desktop.
`width` : int
Width of the screen, in pixels.
`height` : int
Height of the screen, in pixels.
'''
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
return '%s(x=%d, y=%d, width=%d, height=%d)' % \
(self.__class__.__name__, self.x, self.y, self.width, self.height)
def get_best_config(self, template=None):
'''Get the best available GL config.
Any required attributes can be specified in `template`. If
no configuration matches the template, `NoSuchConfigException` will
be raised.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: `pyglet.gl.Config`
:return: A configuration supported by the platform that best
fulfils the needs described by the template.
'''
if template is None:
template = gl.Config()
configs = self.get_matching_configs(template)
if not configs:
raise NoSuchConfigException()
return configs[0]
def get_matching_configs(self, template):
'''Get a list of configs that match a specification.
Any attributes specified in `template` will have values equal
to or greater in each returned config. If no configs satisfy
the template, an empty list is returned.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: list of `pyglet.gl.Config`
:return: A list of matching configs.
'''
raise NotImplementedError('abstract')
class MouseCursor(object):
'''An abstract mouse cursor.'''
#: Indicates if the cursor is drawn using OpenGL. This is True
#: for all mouse cursors except system cursors.
drawable = True
def draw(self, x, y):
'''Abstract render method.
The cursor should be drawn with the "hot" spot at the given
coordinates. The projection is set to the pyglet default (i.e.,
orthographic in window-space), however no other aspects of the
state can be assumed.
:Parameters:
`x` : int
X coordinate of the mouse pointer's hot spot.
`y` : int
Y coordinate of the mouse pointer's hot spot.
'''
raise NotImplementedError('abstract')
class DefaultMouseCursor(MouseCursor):
'''The default mouse cursor used by the operating system.'''
drawable = False
class ImageMouseCursor(MouseCursor):
'''A user-defined mouse cursor created from an image.
Use this class to create your own mouse cursors and assign them
to windows. There are no constraints on the image size or format.
'''
drawable = True
def __init__(self, image, hot_x, hot_y):
'''Create a mouse cursor from an image.
:Parameters:
`image` : `pyglet.image.AbstractImage`
Image to use for the mouse cursor. It must have a
valid `texture` attribute.
`hot_x` : int
X coordinate of the "hot" spot in the image.
`hot_y` : int
Y coordinate of the "hot" spot in the image, measured
from the bottom.
'''
self.texture = image.texture
self.hot_x = hot_x
self.hot_y = hot_y
def draw(self, x, y):
gl.glPushAttrib(gl.GL_ENABLE_BIT)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
self.texture.blit(x - self.hot_x, y - self.hot_y, 0)
gl.glPopAttrib()
class BaseWindow(EventDispatcher, WindowExitHandler):
'''Platform-independent application window.
A window is a "heavyweight" object occupying operating system resources.
The "client" or "content" area of a window is filled entirely with
an OpenGL viewport. Applications have no access to operating system
widgets or controls; all rendering must be done via OpenGL.
Windows may appear as floating regions or can be set to fill an entire
screen (fullscreen). When floating, windows may appear borderless or
decorated with a platform-specific frame (including, for example, the
title bar, minimize and close buttons, resize handles, and so on).
While it is possible to set the location of a window, it is recommended
that applications allow the platform to place it according to local
conventions. This will ensure it is not obscured by other windows,
and appears on an appropriate screen for the user.
To render into a window, you must first call `switch_to`, to make
it the current OpenGL context. If you use only one window in the
application, there is no need to do this.
'''
#: The default window style.
WINDOW_STYLE_DEFAULT = None
#: The window style for pop-up dialogs.
WINDOW_STYLE_DIALOG = 'dialog'
#: The window style for tool windows.
WINDOW_STYLE_TOOL = 'tool'
#: A window style without any decoration.
WINDOW_STYLE_BORDERLESS = 'borderless'
#: The default mouse cursor.
CURSOR_DEFAULT = None
#: A crosshair mouse cursor.
CURSOR_CROSSHAIR = 'crosshair'
#: A pointing hand mouse cursor.
CURSOR_HAND = 'hand'
#: A "help" mouse cursor; typically a question mark and an arrow.
CURSOR_HELP = 'help'
#: A mouse cursor indicating that the selected operation is not permitted.
CURSOR_NO = 'no'
#: A mouse cursor indicating the element can be resized.
CURSOR_SIZE = 'size'
#: A mouse cursor indicating the element can be resized from the top
#: border.
CURSOR_SIZE_UP = 'size_up'
#: A mouse cursor indicating the element can be resized from the
#: upper-right corner.
CURSOR_SIZE_UP_RIGHT = 'size_up_right'
#: A mouse cursor indicating the element can be resized from the right
#: border.
CURSOR_SIZE_RIGHT = 'size_right'
#: A mouse cursor indicating the element can be resized from the lower-right
#: corner.
CURSOR_SIZE_DOWN_RIGHT = 'size_down_right'
#: A mouse cursor indicating the element can be resized from the bottom
#: border.
CURSOR_SIZE_DOWN = 'size_down'
#: A mouse cursor indicating the element can be resized from the lower-left
#: corner.
CURSOR_SIZE_DOWN_LEFT = 'size_down_left'
#: A mouse cursor indicating the element can be resized from the left
#: border.
CURSOR_SIZE_LEFT = 'size_left'
#: A mouse cursor indicating the element can be resized from the upper-left
#: corner.
CURSOR_SIZE_UP_LEFT = 'size_up_left'
#: A mouse cursor indicating the element can be resized vertically.
CURSOR_SIZE_UP_DOWN = 'size_up_down'
#: A mouse cursor indicating the element can be resized horizontally.
CURSOR_SIZE_LEFT_RIGHT = 'size_left_right'
#: A text input mouse cursor (I-beam).
CURSOR_TEXT = 'text'
#: A "wait" mouse cursor; typically an hourglass or watch.
CURSOR_WAIT = 'wait'
#: The "wait" mouse cursor combined with an arrow.
CURSOR_WAIT_ARROW = 'wait_arrow'
# Instance variables accessible only via properties
_width = None
_height = None
_caption = None
_resizable = False
_style = WINDOW_STYLE_DEFAULT
_fullscreen = False
_visible = False
_vsync = False
_screen = None
_config = None
_context = None
# Used to restore window size and position after fullscreen
_windowed_size = None
_windowed_location = None
# Subclasses should update these after relevant events
_mouse_cursor = DefaultMouseCursor()
_mouse_x = 0
_mouse_y = 0
_mouse_visible = True
_mouse_exclusive = False
_mouse_in_window = True
_event_queue = None
_allow_dispatch_event = False # controlled by dispatch_events stack frame
def __init__(self,
width=640,
height=480,
caption=None,
resizable=False,
style=WINDOW_STYLE_DEFAULT,
fullscreen=False,
visible=True,
vsync=True,
display=None,
screen=None,
config=None,
context=None):
'''Create a window.
All parameters are optional, and reasonable defaults are assumed
where they are not specified.
The `display`, `screen`, `config` and `context` parameters form
a hierarchy of control: there is no need to specify more than
one of these. For example, if you specify `screen` the `display`
will be inferred, and a default `config` and `context` will be
created.
`config` is a special case; it can be a template created by the
user specifying the attributes desired, or it can be a complete
`config` as returned from `Screen.get_matching_configs` or similar.
The context will be active as soon as the window is created, as if
`switch_to` was just called.
:Parameters:
`width` : int
Width of the window, in pixels. Ignored if `fullscreen`
is True. Defaults to 640.
`height` : int
Height of the window, in pixels. Ignored if `fullscreen`
is True. Defaults to 480.
`caption` : str or unicode
Initial caption (title) of the window. Defaults to
``sys.argv[0]``.
`resizable` : bool
If True, the window will be resizable. Defaults to False.
`style` : int
One of the ``WINDOW_STYLE_*`` constants specifying the
border style of the window.
`fullscreen` : bool
If True, the window will cover the entire screen rather
than floating. Defaults to False.
`visible` : bool
Determines if the window is visible immediately after
creation. Defaults to True. Set this to False if you
would like to change attributes of the window before
having it appear to the user.
`vsync` : bool
If True, buffer flips are synchronised to the primary screen's
vertical retrace, eliminating flicker.
`display` : `Display`
The display device to use. Useful only under X11.
`screen` : `Screen`
The screen to use, if in fullscreen.
`config` : `pyglet.gl.Config`
Either a template from which to create a complete config,
or a complete config.
`context` : `pyglet.gl.Context`
The context to attach to this window. The context must
not already be attached to another window.
'''
EventDispatcher.__init__(self)
self._event_queue = []
if not display:
display = get_platform().get_default_display()
if not screen:
screen = display.get_default_screen()
if not config:
for template_config in [
gl.Config(double_buffer=True, depth_size=24),
gl.Config(double_buffer=True, depth_size=16)]:
try:
config = screen.get_best_config(template_config)
break
except NoSuchConfigException:
pass
if not config:
raise NoSuchConfigException('No standard config is available.')
if not config.is_complete():
config = screen.get_best_config(config)
if not context:
context = config.create_context(gl.get_current_context())
if fullscreen:
self._windowed_size = width, height
width = screen.width
height = screen.height
self._width = width
self._height = height
self._resizable = resizable
self._fullscreen = fullscreen
self._style = style
self._vsync = vsync
# Set these in reverse order to above, to ensure we get user
# preference
self._context = context
self._config = self._context.config
self._screen = self._config.screen
self._display = self._screen.display
if caption is None:
caption = sys.argv[0]
self._caption = caption
display._windows.append(self)
self._create()
self.switch_to()
if visible:
self.set_visible(True)
self.activate()
def _create(self):
raise NotImplementedError('abstract')
def _recreate(self, changes):
'''Recreate the window with current attributes.
:Parameters:
`changes` : list of str
List of attribute names that were changed since the last
`_create` or `_recreate`. For example, ``['fullscreen']``
is given if the window is to be toggled to or from fullscreen.
'''
raise NotImplementedError('abstract')
def set_fullscreen(self, fullscreen=True, screen=None):
'''Toggle to or from fullscreen.
After toggling fullscreen, the GL context should have retained its
state and objects, however the buffers will need to be cleared and
redrawn.
:Parameters:
`fullscreen` : bool
True if the window should be made fullscreen, False if it
should be windowed.
`screen` : Screen
If not None and fullscreen is True, the window is moved to the
given screen. The screen must belong to the same display as
the window.
'''
if fullscreen == self._fullscreen and screen is None:
return
if not self._fullscreen:
# Save windowed size
self._windowed_size = self.get_size()
self._windowed_location = self.get_location()
if fullscreen and screen is not None:
assert screen.display is self.display
self._screen = screen
self._fullscreen = fullscreen
if self._fullscreen:
self._width = self.screen.width
self._height = self.screen.height
else:
self._width, self._height = self._windowed_size
self._recreate(['fullscreen'])
if not self._fullscreen and self._windowed_location:
# Restore windowed location -- no effect on OS X because of
# deferred recreate. Move into platform _create? XXX
self.set_location(*self._windowed_location)
def on_resize(self, width, height):
'''A default resize event handler.
This default handler updates the GL viewport to cover the entire
window and sets the ``GL_PROJECTION`` matrix to be orthagonal in
window space. The bottom-left corner is (0, 0) and the top-right
corner is the width and height of the window in pixels.
Override this event handler with your own to create another
projection, for example in perspective.
'''
self.switch_to()
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
def close(self):
'''Close the window.
Windows are closed automatically when the process exits, so this
method need only be called when multiple windows or console input
are being used.
After closing the window, the GL context will be invalid. The
window instance cannot be reused once closed (see also `set_visible`).
'''
self._display._windows.remove(self)
self._context.destroy()
self._config = None
self._context = None
def draw_mouse_cursor(self):
'''Draw the custom mouse cursor.
If the current mouse cursor has ``drawable`` set, this method
is called before the buffers are flipped to render it.
This method always leaves the ``GL_MODELVIEW`` matrix as current,
regardless of what it was set to previously. No other GL state
is affected.
There is little need to override this method; instead, subclass
``MouseCursor`` and provide your own ``draw`` method.
'''
# Draw mouse cursor if set and visible.
# XXX leaves state in modelview regardless of starting state
if (self._mouse_cursor.drawable and
self._mouse_visible and
self._mouse_in_window):
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPushMatrix()
gl.glLoadIdentity()
gl.glOrtho(0, self.width, 0, self.height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glLoadIdentity()
self._mouse_cursor.draw(self._mouse_x, self._mouse_y)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPopMatrix()
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPopMatrix()
# Properties provide read-only access to instance variables. Use
# set_* methods to change them if applicable.
caption = property(lambda self: self._caption,
doc='''The window caption (title). Read-only.
:type: str
''')
resizable = property(lambda self: self._resizable,
doc='''True if the window is resizeable. Read-only.
:type: bool
''')
style = property(lambda self: self._style,
doc='''The window style; one of the ``WINDOW_STYLE_*`` constants.
Read-only.
:type: int
''')
fullscreen = property(lambda self: self._fullscreen,
doc='''True if the window is currently fullscreen. Read-only.
:type: bool
''')
visible = property(lambda self: self._visible,
doc='''True if the window is currently visible. Read-only.
:type: bool
''')
vsync = property(lambda self: self._vsync,
doc='''True if buffer flips are synchronised to the screen's vertical
retrace. Read-only.
:type: bool
''')
display = property(lambda self: self._display,
doc='''The display this window belongs to. Read-only.
:type: `Display`
''')
screen = property(lambda self: self._screen,
doc='''The screen this window is fullscreen in. Read-only.
:type: `Screen`
''')
config = property(lambda self: self._config,
doc='''A GL config describing the context of this window. Read-only.
:type: `pyglet.gl.Config`
''')
context = property(lambda self: self._context,
doc='''The OpenGL context attached to this window. Read-only.
:type: `pyglet.gl.Context`
''')
# These are the only properties that can be set
width = property(lambda self: self.get_size()[0],
lambda self, width: self.set_size(width, self.height),
doc='''The width of the window, in pixels. Read-write.
:type: int
''')
height = property(lambda self: self.get_size()[1],
lambda self, height: self.set_size(self.width, height),
doc='''The height of the window, in pixels. Read-write.
:type: int
''')
def set_caption(self, caption):
'''Set the window's caption.
The caption appears in the titlebar of the window, if it has one,
and in the taskbar on Windows and many X11 window managers.
:Parameters:
`caption` : str or unicode
The caption to set.
'''
raise NotImplementedError('abstract')
def set_minimum_size(self, width, height):
'''Set the minimum size of the window.
Once set, the user will not be able to resize the window smaller
than the given dimensions. There is no way to remove the
minimum size constraint on a window (but you could set it to 0,0).
The behaviour is undefined if the minimum size is set larger than
the current size of the window.
The window size does not include the border or title bar.
:Parameters:
`width` : int
Minimum width of the window, in pixels.
`height` : int
Minimum height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def set_maximum_size(self, width, height):
'''Set the maximum size of the window.
Once set, the user will not be able to resize the window larger
than the given dimensions. There is no way to remove the
maximum size constraint on a window (but you could set it to a large
value).
The behaviour is undefined if the maximum size is set smaller than
the current size of the window.
The window size does not include the border or title bar.
:Parameters:
`width` : int
Maximum width of the window, in pixels.
`height` : int
Maximum height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def set_size(self, width, height):
'''Resize the window.
The behaviour is undefined if the window is not resizable, or if
it is currently fullscreen.
The window size does not include the border or title bar.
:Parameters:
`width` : int
New width of the window, in pixels.
`height` : int
New height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def get_size(self):
'''Return the current size of the window.
The window size does not include the border or title bar.
:rtype: (int, int)
:return: The width and height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def set_location(self, x, y):
'''Set the position of the window.
:Parameters:
`x` : int
Distance of the left edge of the window from the left edge
of the virtual desktop, in pixels.
`y` : int
Distance of the top edge of the window from the top edge of
the virtual desktop, in pixels.
'''
raise NotImplementedError('abstract')
def get_location(self):
'''Return the current position of the window.
:rtype: (int, int)
:return: The distances of the left and top edges from their respective
edges on the virtual desktop, in pixels.
'''
raise NotImplementedError('abstract')
def activate(self):
'''Attempt to restore keyboard focus to the window.
Depending on the window manager or operating system, this may not
be successful. For example, on Windows XP an application is not
allowed to "steal" focus from another application. Instead, the
window's taskbar icon will flash, indicating it requires attention.
'''
raise NotImplementedError('abstract')
def set_visible(self, visible=True):
'''Show or hide the window.
:Parameters:
`visible` : bool
If True, the window will be shown; otherwise it will be
hidden.
'''
raise NotImplementedError('abstract')
def minimize(self):
'''Minimize the window.
'''
raise NotImplementedError('abstract')
def maximize(self):
'''Maximize the window.
The behaviour of this method is somewhat dependent on the user's
display setup. On a multi-monitor system, the window may maximize
to either a single screen or the entire virtual desktop.
'''
raise NotImplementedError('abstract')
def set_vsync(self, vsync):
'''Enable or disable vertical sync control.
When enabled, this option ensures flips from the back to the front
buffer are performed only during the vertical retrace period of the
primary display. This can prevent "tearing" or flickering when
the buffer is updated in the middle of a video scan.
Note that LCD monitors have an analagous time in which they are not
reading from the video buffer; while it does not correspond to
a vertical retrace it has the same effect.
With multi-monitor systems the secondary monitor cannot be
synchronised to, so tearing and flicker cannot be avoided when the
window is positioned outside of the primary display. In this case
it may be advisable to forcibly reduce the framerate (for example,
using `pyglet.clock.set_fps_limit`).
:Parameters:
`vsync` : bool
If True, vsync is enabled, otherwise it is disabled.
'''
raise NotImplementedError('abstract')
def set_mouse_visible(self, visible=True):
'''Show or hide the mouse cursor.
The mouse cursor will only be hidden while it is positioned within
this window. Mouse events will still be processed as usual.
:Parameters:
`visible` : bool
If True, the mouse cursor will be visible, otherwise it
will be hidden.
'''
self._mouse_visible = visible
self.set_mouse_platform_visible()
def set_mouse_platform_visible(self, platform_visible=None):
'''Set the platform-drawn mouse cursor visibility. This is called
automatically after changing the mouse cursor or exclusive mode.
Applications should not normally need to call this method, see
`set_mouse_visible` instead.
:Parameters:
`platform_visible` : bool or None
If None, sets platform visibility to the required visibility
for the current exclusive mode and cursor type. Otherwise,
a bool value will override and force a visibility.
'''
raise NotImplementedError()
def set_mouse_cursor(self, cursor=None):
'''Change the appearance of the mouse cursor.
The appearance of the mouse cursor is only changed while it is
within this window.
:Parameters:
`cursor` : `MouseCursor`
The cursor to set, or None to restore the default cursor.
'''
if cursor is None:
cursor = DefaultMouseCursor()
self._mouse_cursor = cursor
self.set_mouse_platform_visible()
def set_exclusive_mouse(self, exclusive=True):
'''Hide the mouse cursor and direct all mouse events to this
window.
When enabled, this feature prevents the mouse leaving the window. It
is useful for certain styles of games that require complete control of
the mouse. The position of the mouse as reported in subsequent events
is meaningless when exclusive mouse is enabled; you should only use
the relative motion parameters ``dx`` and ``dy``.
:Parameters:
`exclusive` : bool
If True, exclusive mouse is enabled, otherwise it is disabled.
'''
raise NotImplementedError('abstract')
def set_exclusive_keyboard(self, exclusive=True):
'''Prevent the user from switching away from this window using
keyboard accelerators.
When enabled, this feature disables certain operating-system specific
key combinations such as Alt+Tab (Command+Tab on OS X). This can be
useful in certain kiosk applications, it should be avoided in general
applications or games.
:Parameters:
`exclusive` : bool
If True, exclusive keyboard is enabled, otherwise it is
disabled.
'''
raise NotImplementedError('abstract')
def get_system_mouse_cursor(self, name):
'''Obtain a system mouse cursor.
Use `set_mouse_cursor` to make the cursor returned by this method
active. The names accepted by this method are the ``CURSOR_*``
constants defined on this class.
:Parameters:
`name` : str
Name describing the mouse cursor to return. For example,
``CURSOR_WAIT``, ``CURSOR_HELP``, etc.
:rtype: `MouseCursor`
:return: A mouse cursor which can be used with `set_mouse_cursor`.
'''
raise NotImplementedError()
def set_icon(self, *images):
'''Set the window icon.
If multiple images are provided, one with an appropriate size
will be selected (if the correct size is not provided, the image
will be scaled).
Useful sizes to provide are 16x16, 32x32, 64x64 (Mac only) and
128x128 (Mac only).
:Parameters:
`images` : sequence of `pyglet.image.AbstractImage`
List of images to use for the window icon.
'''
pass
def clear(self):
'''Clear the window.
This is a convenience method for clearing the color and depth
buffer. The window must be the active context (see `switch_to`).
'''
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
def dispatch_event(self, *args):
if self._allow_dispatch_event:
EventDispatcher.dispatch_event(self, *args)
else:
self._event_queue.append(args)
def dispatch_events(self):
'''Process the operating system event queue and call attached
event handlers.
'''
raise NotImplementedError('abstract')
# If documenting, show the event methods. Otherwise, leave them out
# as they are not really methods.
if hasattr(sys, 'is_epydoc') and sys.is_epydoc:
def on_key_press(symbol, modifiers):
'''A key on the keyboard was pressed (and held down).
:Parameters:
`symbol` : int
The key symbol pressed.
`modifiers` : int
Bitwise combination of the key modifiers active.
:event:
'''
def on_key_release(symbol, modifiers):
'''A key on the keyboard was released.
:Parameters:
`symbol` : int
The key symbol pressed.
`modifiers` : int
Bitwise combination of the key modifiers active.
:event:
'''
def on_text(text):
'''The user input some text.
Typically this is called after `on_key_press` and before
`on_key_release`, but may also be called multiple times if the key
is held down (key repeating); or called without key presses if
another input method was used (e.g., a pen input).
You should always use this method for interpreting text, as the
key symbols often have complex mappings to their unicode
representation which this event takes care of.
:Parameters:
`text` : unicode
The text entered by the user.
:event:
'''
def on_text_motion(motion):
'''The user moved the text input cursor.
Typically this is called after `on_key_press` and before
`on_key_release`, but may also be called multiple times if the key
is help down (key repeating).
You should always use this method for moving the text input cursor
(caret), as different platforms have different default keyboard
mappings, and key repeats are handled correctly.
The values that `motion` can take are defined in
`pyglet.window.key`:
* MOTION_UP
* MOTION_RIGHT
* MOTION_DOWN
* MOTION_LEFT
* MOTION_NEXT_WORD
* MOTION_PREVIOUS_WORD
* MOTION_BEGINNING_OF_LINE
* MOTION_END_OF_LINE
* MOTION_NEXT_PAGE
* MOTION_PREVIOUS_PAGE
* MOTION_BEGINNING_OF_FILE
* MOTION_END_OF_FILE
* MOTION_BACKSPACE
* MOTION_DELETE
:Parameters:
`motion` : int
The direction of motion; see remarks.
:event:
'''
def on_text_motion_select(motion):
'''The user moved the text input cursor while extending the
selection.
Typically this is called after `on_key_press` and before
`on_key_release`, but may also be called multiple times if the key
is help down (key repeating).
You should always use this method for responding to text selection
events rather than the raw `on_key_press`, as different platforms
have different default keyboard mappings, and key repeats are
handled correctly.
The values that `motion` can take are defined in `pyglet.window.key`:
* MOTION_UP
* MOTION_RIGHT
* MOTION_DOWN
* MOTION_LEFT
* MOTION_NEXT_WORD
* MOTION_PREVIOUS_WORD
* MOTION_BEGINNING_OF_LINE
* MOTION_END_OF_LINE
* MOTION_NEXT_PAGE
* MOTION_PREVIOUS_PAGE
* MOTION_BEGINNING_OF_FILE
* MOTION_END_OF_FILE
:Parameters:
`motion` : int
The direction of selection motion; see remarks.
:event:
'''
def on_mouse_motion(x, y, dx, dy):
'''The mouse was moved with no buttons held down.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`dx` : float
Relative X position from the previous mouse position.
`dy` : float
Relative Y position from the previous mouse position.
:event:
'''
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
'''The mouse was moved with one or more mouse buttons pressed.
This event will continue to be fired even if the mouse leaves
the window, so long as the drag buttons are continuously held down.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`dx` : float
Relative X position from the previous mouse position.
`dy` : float
Relative Y position from the previous mouse position.
`buttons` : int
Bitwise combination of the mouse buttons currently pressed.
`modifiers` : int
Bitwise combination of any keyboard modifiers currently
active.
:event:
'''
def on_mouse_press(x, y, button, modifiers):
'''A mouse button was pressed (and held down).
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`button` : int
The mouse button that was pressed.
`modifiers` : int
Bitwise combination of any keyboard modifiers currently
active.
:event:
'''
def on_mouse_release(x, y, button, modifiers):
'''A mouse button was released.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`button` : int
The mouse button that was released.
`modifiers` : int
Bitwise combination of any keyboard modifiers currently
active.
:event:
'''
def on_mouse_scroll(x, y, scroll_x, scroll_y):
'''The mouse wheel was scrolled.
Note that most mice have only a vertical scroll wheel, so
`scroll_x` is usually 0. An exception to this is the Apple Mighty
Mouse, which has a mouse ball in place of the wheel which allows
both `scroll_x` and `scroll_y` movement.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`scroll_x` : int
Number of "clicks" towards the right (left if negative).
`scroll_y` : int
Number of "clicks" upwards (downards if negative).
:event:
'''
def on_close():
'''The user attempted to close the window.
This event can be triggered by clicking on the "X" control box in
the window title bar, or by some other platform-dependent manner.
:event:
'''
def on_mouse_enter(x, y):
'''The mouse was moved into the window.
This event will not be trigged if the mouse is currently being
dragged.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
:event:
'''
def on_mouse_leave(x, y):
'''The mouse was moved outside of the window.
This event will not be trigged if the mouse is currently being
dragged. Note that the coordinates of the mouse pointer will be
outside of the window rectangle.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
:event:
'''
def on_expose():
'''A portion of the window needs to be redrawn.
This event is triggered when the window first appears, and any time
the contents of the window is invalidated due to another window
obscuring it.
There is no way to determine which portion of the window needs
redrawing. Note that the use of this method is becoming
increasingly uncommon, as newer window managers composite windows
automatically and keep a backing store of the window contents.
:event:
'''
def on_resize(width, height):
'''The window was resized.
:Parameters:
`width` : int
The new width of the window, in pixels.
`height` : int
The new height of the window, in pixels.
:event:
'''
def on_move(x, y):
'''The window was moved.
:Parameters:
`x` : int
Distance from the left edge of the screen to the left edge
of the window.
`y` : int
Distance from the top edge of the screen to the top edge of
the window. Note that this is one of few methods in pyglet
which use a Y-down coordinate system.
:event:
'''
def on_activate():
'''The window was activated.
This event can be triggered by clicking on the title bar, bringing
it to the foreground; or by some platform-specific method.
When a window is "active" it has the keyboard focus.
:event:
'''
def on_deactivate():
'''The window was deactivated.
This event can be triggered by clicking on another application
window. When a window is deactivated it no longer has the
keyboard focus.
:event:
'''
def on_show():
'''The window was shown.
This event is triggered when a window is restored after being
minimised, or after being displayed for the first time.
:event:
'''
def on_hide():
'''The window was hidden.
This event is triggered when a window is minimised or (on Mac OS X)
hidden by the user.
:event:
'''
def on_context_lost():
'''The window's GL context was lost.
When the context is lost no more GL methods can be called until it
is recreated. This is a rare event, triggered perhaps by the user
switching to an incompatible video mode. When it occurs, an
application will need to reload all objects (display lists, texture
objects, shaders) as well as restore the GL state.
:event:
'''
def on_context_state_lost():
'''The state of the window's GL context was lost.
pyglet may sometimes need to recreate the window's GL context if
the window is moved to another video device, or between fullscreen
or windowed mode. In this case it will try to share the objects
(display lists, texture objects, shaders) between the old and new
contexts. If this is possible, only the current state of the GL
context is lost, and the application should simply restore state.
:event:
'''
BaseWindow.register_event_type('on_key_press')
BaseWindow.register_event_type('on_key_release')
BaseWindow.register_event_type('on_text')
BaseWindow.register_event_type('on_text_motion')
BaseWindow.register_event_type('on_text_motion_select')
BaseWindow.register_event_type('on_mouse_motion')
BaseWindow.register_event_type('on_mouse_drag')
BaseWindow.register_event_type('on_mouse_press')
BaseWindow.register_event_type('on_mouse_release')
BaseWindow.register_event_type('on_mouse_scroll')
BaseWindow.register_event_type('on_mouse_enter')
BaseWindow.register_event_type('on_mouse_leave')
BaseWindow.register_event_type('on_close')
BaseWindow.register_event_type('on_expose')
BaseWindow.register_event_type('on_resize')
BaseWindow.register_event_type('on_move')
BaseWindow.register_event_type('on_activate')
BaseWindow.register_event_type('on_deactivate')
BaseWindow.register_event_type('on_show')
BaseWindow.register_event_type('on_hide')
BaseWindow.register_event_type('on_context_lost')
BaseWindow.register_event_type('on_context_state_lost')
def get_platform():
'''Get an instance of the Platform most appropriate for this
system.
:rtype: `Platform`
:return: The platform instance.
'''
return _platform
if hasattr(sys, 'is_epydoc') and sys.is_epydoc:
# We are building documentation
Window = BaseWindow
Window.__name__ = 'Window'
del BaseWindow
else:
# Try to determine which platform to use.
if sys.platform == 'darwin':
from pyglet.window.carbon import CarbonPlatform, CarbonWindow
_platform = CarbonPlatform()
Window = CarbonWindow
elif sys.platform in ('win32', 'cygwin'):
from pyglet.window.win32 import Win32Platform, Win32Window
_platform = Win32Platform()
Window = Win32Window
else:
from pyglet.window.xlib import XlibPlatform, XlibWindow
_platform = XlibPlatform()
Window = XlibWindow
| certik/sympy-oldcore | sympy/plotting/pyglet/window/__init__.py | Python | bsd-3-clause | 54,133 |
from .working_gif import working_encoded
from .splash import SplashScreen, Spinner, CheckProcessor
from .multilistbox import MultiListbox
from .utils import set_widget_state, set_binding, set_button_action, set_tab_order
from .tooltip import ToolTip
| rutherford/tikitiki | tikitiki/__init__.py | Python | bsd-3-clause | 255 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
from nipype.utils.filemanip import fname_presuffix, split_filename
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
OutputMultiPath, Directory, isdefined)
class MRISPreprocInputSpec(FSTraitedSpec):
out_file = File(argstr='--out %s', genfile=True,
desc='output filename')
target = traits.Str(argstr='--target %s', mandatory=True,
desc='target subject name')
hemi = traits.Enum('lh', 'rh', argstr='--hemi %s',
mandatory=True,
desc='hemisphere for source and target')
surf_measure = traits.Str(argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Use subject/surf/hemi.surf_measure as input')
surf_area = traits.Str(argstr='--area %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Extract vertex area from subject/surf/hemi.surfname to use as input.')
subjects = traits.List(argstr='--s %s...',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='subjects from who measures are calculated')
fsgd_file = File(exists=True, argstr='--fsgd %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='specify subjects using fsgd file')
subject_file = File(exists=True, argstr='--f %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='file specifying subjects separated by white space')
surf_measure_file = InputMultiPath(File(exists=True), argstr='--is %s...',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file alternative to surfmeas, still requires list of subjects')
source_format = traits.Str(argstr='--srcfmt %s', desc='source format')
surf_dir = traits.Str(argstr='--surfdir %s',
desc='alternative directory (instead of surf)')
vol_measure_file = InputMultiPath(traits.Tuple(File(exists=True),
File(exists=True)),
argstr='--iv %s %s...',
desc='list of volume measure and reg file tuples')
proj_frac = traits.Float(argstr='--projfrac %s',
desc='projection fraction for vol2surf')
fwhm = traits.Float(argstr='--fwhm %f',
xor=['num_iters'],
desc='smooth by fwhm mm on the target surface')
num_iters = traits.Int(argstr='--niters %d',
xor=['fwhm'],
desc='niters : smooth by niters on the target surface')
fwhm_source = traits.Float(argstr='--fwhm-src %f',
xor=['num_iters_source'],
desc='smooth by fwhm mm on the source surface')
num_iters_source = traits.Int(argstr='--niterssrc %d',
xor=['fwhm_source'],
desc='niters : smooth by niters on the source surface')
smooth_cortex_only = traits.Bool(argstr='--smooth-cortex-only',
desc='only smooth cortex (ie, exclude medial wall)')
class MRISPreprocOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='preprocessed output file')
class MRISPreproc(FSCommand):
"""Use FreeSurfer mris_preproc to prepare a group of contrasts for
a second level analysis
Examples
--------
>>> preproc = MRISPreproc()
>>> preproc.inputs.target = 'fsaverage'
>>> preproc.inputs.hemi = 'lh'
>>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \
('cont1a.nii', 'register.dat')]
>>> preproc.inputs.out_file = 'concatenated_file.mgz'
>>> preproc.cmdline
'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat'
"""
_cmd = 'mris_preproc'
input_spec = MRISPreprocInputSpec
output_spec = MRISPreprocOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.out_file
outputs['out_file'] = outfile
if not isdefined(outfile):
outputs['out_file'] = os.path.join(os.getcwd(),
'concat_%s_%s.mgz' % (self.inputs.hemi,
self.inputs.target))
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class GLMFitInputSpec(FSTraitedSpec):
glm_dir = traits.Str(argstr='--glmdir %s', desc='save outputs to dir',
genfile=True)
in_file = File(desc='input 4D file', argstr='--y %s', mandatory=True,
copyfile=False)
_design_xor = ('fsgd', 'design', 'one_sample')
fsgd = traits.Tuple(File(exists=True), traits.Enum('doss', 'dods'),
argstr='--fsgd %s %s', xor=_design_xor,
desc='freesurfer descriptor file')
design = File(exists=True, argstr='--X %s', xor=_design_xor,
desc='design matrix file')
contrast = InputMultiPath(File(exists=True), argstr='--C %s...',
desc='contrast file')
one_sample = traits.Bool(argstr='--osgm',
xor=('one_sample', 'fsgd', 'design', 'contrast'),
desc='construct X and C as a one-sample group mean')
no_contrast_sok = traits.Bool(argstr='--no-contrasts-ok',
desc='do not fail if no contrasts specified')
per_voxel_reg = InputMultiPath(File(exists=True), argstr='--pvr %s...',
desc='per-voxel regressors')
self_reg = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--selfreg %d %d %d',
desc='self-regressor from index col row slice')
weighted_ls = File(exists=True, argstr='--wls %s',
xor=('weight_file', 'weight_inv', 'weight_sqrt'),
desc='weighted least squares')
fixed_fx_var = File(exists=True, argstr='--yffxvar %s',
desc='for fixed effects analysis')
fixed_fx_dof = traits.Int(argstr='--ffxdof %d',
xor=['fixed_fx_dof_file'],
desc='dof for fixed effects analysis')
fixed_fx_dof_file = File(argstr='--ffxdofdat %d',
xor=['fixed_fx_dof'],
desc='text file with dof for fixed effects analysis')
weight_file = File(exists=True, xor=['weighted_ls'],
desc='weight for each input at each voxel')
weight_inv = traits.Bool(argstr='--w-inv', desc='invert weights',
xor=['weighted_ls'])
weight_sqrt = traits.Bool(argstr='--w-sqrt', desc='sqrt of weights',
xor=['weighted_ls'])
fwhm = traits.Range(low=0.0, argstr='--fwhm %f',
desc='smooth input by fwhm')
var_fwhm = traits.Range(low=0.0, argstr='--var-fwhm %f',
desc='smooth variance by fwhm')
no_mask_smooth = traits.Bool(argstr='--no-mask-smooth',
desc='do not mask when smoothing')
no_est_fwhm = traits.Bool(argstr='--no-est-fwhm',
desc='turn off FWHM output estimation')
mask_file = File(exists=True, argstr='--mask %s', desc='binary mask')
label_file = File(exists=True, argstr='--label %s',
xor=['cortex'],
desc='use label as mask, surfaces only')
cortex = traits.Bool(argstr='--cortex',
xor=['label_file'],
desc='use subjects ?h.cortex.label as label')
invert_mask = traits.Bool(argstr='--mask-inv',
desc='invert mask')
prune = traits.Bool(argstr='--prune',
desc='remove voxels that do not have a non-zero value at each frame (def)')
no_prune = traits.Bool(argstr='--no-prune',
xor=['prunethresh'],
desc='do not prune')
prune_thresh = traits.Float(argstr='--prune_thr %f',
xor=['noprune'],
desc='prune threshold. Default is FLT_MIN')
compute_log_y = traits.Bool(argstr='--logy',
desc='compute natural log of y prior to analysis')
save_estimate = traits.Bool(argstr='--yhat-save',
desc='save signal estimate (yhat)')
save_residual = traits.Bool(argstr='--eres-save',
desc='save residual error (eres)')
save_res_corr_mtx = traits.Bool(argstr='--eres-scm',
desc='save residual error spatial correlation matrix (eres.scm). Big!')
surf = traits.Bool(argstr="--surf %s %s %s",
requires=["subject_id", "hemi"],
desc="analysis is on a surface mesh")
subject_id = traits.Str(desc="subject id for surface geometry")
hemi = traits.Enum("lh", "rh", desc="surface hemisphere")
surf_geo = traits.Str("white", usedefault=True,
desc="surface geometry name (e.g. white, pial)")
simulation = traits.Tuple(traits.Enum('perm', 'mc-full', 'mc-z'),
traits.Int(min=1), traits.Float, traits.Str,
argstr='--sim %s %d %f %s',
desc='nulltype nsim thresh csdbasename')
sim_sign = traits.Enum('abs', 'pos', 'neg', argstr='--sim-sign %s',
desc='abs, pos, or neg')
uniform = traits.Tuple(traits.Float, traits.Float,
argstr='--uniform %f %f',
desc='use uniform distribution instead of gaussian')
pca = traits.Bool(argstr='--pca',
desc='perform pca/svd analysis on residual')
calc_AR1 = traits.Bool(argstr='--tar1',
desc='compute and save temporal AR1 of residual')
save_cond = traits.Bool(argstr='--save-cond',
desc='flag to save design matrix condition at each voxel')
vox_dump = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--voxdump %d %d %d',
desc='dump voxel GLM and exit')
seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise')
synth = traits.Bool(argstr='--synth', desc='replace input with gaussian')
resynth_test = traits.Int(argstr='--resynthtest %d', desc='test GLM by resynthsis')
profile = traits.Int(argstr='--profile %d', desc='niters : test speed')
force_perm = traits.Bool(argstr='--perm-force',
desc='force perumtation test, even when design matrix is not orthog')
diag = traits.Int('--diag %d', desc='Gdiag_no : set diagnositc level')
diag_cluster = traits.Bool(argstr='--diag-cluster',
desc='save sig volume and exit from first sim loop')
debug = traits.Bool(argstr='--debug', desc='turn on debugging')
check_opts = traits.Bool(argstr='--checkopts',
desc="don't run anything, just check options and exit")
allow_repeated_subjects = traits.Bool(argstr='--allowsubjrep',
desc='allow subject names to repeat in the fsgd file (must appear before --fsgd')
allow_ill_cond = traits.Bool(argstr='--illcond',
desc='allow ill-conditioned design matrices')
sim_done_file = File(argstr='--sim-done %s',
desc='create file when simulation finished')
class GLMFitOutputSpec(TraitedSpec):
glm_dir = Directory(exists=True, desc="output directory")
beta_file = File(exists=True, desc="map of regression coefficients")
error_file = File(desc="map of residual error")
error_var_file = File(desc="map of residual error variance")
error_stddev_file = File(desc="map of residual error standard deviation")
estimate_file = File(desc="map of the estimated Y values")
mask_file = File(desc="map of the mask used in the analysis")
fwhm_file = File(desc="text file with estimated smoothness")
dof_file = File(desc="text file with effective degrees-of-freedom for the analysis")
gamma_file = OutputMultiPath(desc="map of contrast of regression coefficients")
gamma_var_file = OutputMultiPath(desc="map of regression contrast variance")
sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)")
ftest_file = OutputMultiPath(desc="map of test statistic values")
spatial_eigenvectors = File(desc="map of spatial eigenvectors from residual PCA")
frame_eigenvectors = File(desc="matrix of frame eigenvectors from residual PCA")
singular_values = File(desc="matrix singular values from residual PCA")
svd_stats_file = File(desc="text file summarizing the residual PCA")
class GLMFit(FSCommand):
"""Use FreeSurfer's mri_glmfit to specify and estimate a general linear model.
Examples
--------
>>> glmfit = GLMFit()
>>> glmfit.inputs.in_file = 'functional.nii'
>>> glmfit.inputs.one_sample = True
>>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd()
True
"""
_cmd = 'mri_glmfit'
input_spec = GLMFitInputSpec
output_spec = GLMFitOutputSpec
def _format_arg(self, name, spec, value):
if name == "surf":
_si = self.inputs
return spec.argstr % (_si.subject_id, _si.hemi, _si.surf_geo)
return super(GLMFit, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
# Get the top-level output directory
if not isdefined(self.inputs.glm_dir):
glmdir = os.getcwd()
else:
glmdir = os.path.abspath(self.inputs.glm_dir)
outputs["glm_dir"] = glmdir
# Assign the output files that always get created
outputs["beta_file"] = os.path.join(glmdir, "beta.mgh")
outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh")
outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh")
outputs["mask_file"] = os.path.join(glmdir, "mask.mgh")
outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat")
outputs["dof_file"] = os.path.join(glmdir, "dof.dat")
# Assign the conditional outputs
if isdefined(self.inputs.save_residual) and self.inputs.save_residual:
outputs["error_file"] = os.path.join(glmdir, "eres.mgh")
if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate:
outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh")
# Get the contrast directory name(s)
if isdefined(self.inputs.contrast):
contrasts = []
for c in self.inputs.contrast:
if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]:
contrasts.append(split_filename(c)[1])
else:
contrasts.append(os.path.split(c)[1])
elif isdefined(self.inputs.one_sample) and self.inputs.one_sample:
contrasts = ["osgm"]
# Add in the contrast images
outputs["sig_file"] = [os.path.join(glmdir, c, "sig.mgh") for c in contrasts]
outputs["ftest_file"] = [os.path.join(glmdir, c, "F.mgh") for c in contrasts]
outputs["gamma_file"] = [os.path.join(glmdir, c, "gamma.mgh") for c in contrasts]
outputs["gamma_var_file"] = [os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts]
# Add in the PCA results, if relevant
if isdefined(self.inputs.pca) and self.inputs.pca:
pcadir = os.path.join(glmdir, "pca-eres")
outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh")
outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx")
outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat")
outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat")
return outputs
def _gen_filename(self, name):
if name == 'glm_dir':
return os.getcwd()
return None
class OneSampleTTest(GLMFit):
def __init__(self, **kwargs):
super(OneSampleTTest, self).__init__(**kwargs)
self.inputs.one_sample = True
class BinarizeInputSpec(FSTraitedSpec):
in_file = File(exists=True, argstr='--i %s', mandatory=True,
copyfile=False, desc='input volume')
min = traits.Float(argstr='--min %f', xor=['wm_ven_csf'],
desc='min thresh')
max = traits.Float(argstr='--max %f', xor=['wm_ven_csf'],
desc='max thresh')
rmin = traits.Float(argstr='--rmin %f',
desc='compute min based on rmin*globalmean')
rmax = traits.Float(argstr='--rmax %f',
desc='compute max based on rmax*globalmean')
match = traits.List(traits.Int, argstr='--match %d...',
desc='match instead of threshold')
wm = traits.Bool(argstr='--wm',
desc='set match vals to 2 and 41 (aseg for cerebral WM)')
ventricles = traits.Bool(argstr='--ventricles',
desc='set match vals those for aseg ventricles+choroid (not 4th)')
wm_ven_csf = traits.Bool(argstr='--wm+vcsf', xor=['min', 'max'],
desc='WM and ventricular CSF, including choroid (not 4th)')
binary_file = File(argstr='--o %s', genfile=True,
desc='binary output volume')
out_type = traits.Enum('nii', 'nii.gz', 'mgz', argstr='',
desc='output file type')
count_file = traits.Either(traits.Bool, File,
argstr='--count %s',
desc='save number of hits in ascii file (hits, ntotvox, pct)')
bin_val = traits.Int(argstr='--binval %d',
desc='set vox within thresh to val (default is 1)')
bin_val_not = traits.Int(argstr='--binvalnot %d',
desc='set vox outside range to val (default is 0)')
invert = traits.Bool(argstr='--inv',
desc='set binval=0, binvalnot=1')
frame_no = traits.Int(argstr='--frame %s',
desc='use 0-based frame of input (default is 0)')
merge_file = File(exists=True, argstr='--merge %s',
desc='merge with mergevol')
mask_file = File(exists=True, argstr='--mask maskvol',
desc='must be within mask')
mask_thresh = traits.Float(argstr='--mask-thresh %f',
desc='set thresh for mask')
abs = traits.Bool(argstr='--abs',
desc='take abs of invol first (ie, make unsigned)')
bin_col_num = traits.Bool(argstr='--bincol',
desc='set binarized voxel value to its column number')
zero_edges = traits.Bool(argstr='--zero-edges',
desc='zero the edge voxels')
zero_slice_edge = traits.Bool(argstr='--zero-slice-edges',
desc='zero the edge slice voxels')
dilate = traits.Int(argstr='--dilate %d',
desc='niters: dilate binarization in 3D')
erode = traits.Int(argstr='--erode %d',
desc='nerode: erode binarization in 3D (after any dilation)')
erode2d = traits.Int(argstr='--erode2d %d',
desc='nerode2d: erode binarization in 2D (after any 3D erosion)')
class BinarizeOutputSpec(TraitedSpec):
binary_file = File(exists=True, desc='binarized output volume')
count_file = File(desc='ascii file containing number of hits')
class Binarize(FSCommand):
"""Use FreeSurfer mri_binarize to threshold an input volume
Examples
--------
>>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii')
>>> binvol.cmdline
'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000'
"""
_cmd = 'mri_binarize'
input_spec = BinarizeInputSpec
output_spec = BinarizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.binary_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='.'.join(('_thresh',
self.inputs.out_type)),
use_ext=False)
else:
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='_thresh')
outputs['binary_file'] = os.path.abspath(outfile)
value = self.inputs.count_file
if isdefined(value):
if isinstance(value, bool):
if value:
outputs['count_file'] = fname_presuffix(self.inputs.in_file,
suffix='_count.txt',
newpath=os.getcwd(),
use_ext=False)
else:
outputs['count_file'] = value
return outputs
def _format_arg(self, name, spec, value):
if name == 'count_file':
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
if name == 'out_type':
return ''
return super(Binarize, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'binary_file':
return self._list_outputs()[name]
return None
class ConcatenateInputSpec(FSTraitedSpec):
in_files = InputMultiPath(File(exists=True),
desc='Individual volumes to be concatenated',
argstr='--i %s...', mandatory=True)
concatenated_file = File(desc='Output volume', argstr='--o %s',
genfile=True)
sign = traits.Enum('abs', 'pos', 'neg', argstr='--%s',
desc='Take only pos or neg voxles from input, or take abs')
stats = traits.Enum('sum', 'var', 'std', 'max', 'min', 'mean', argstr='--%s',
desc='Compute the sum, var, std, max, min or mean of the input volumes')
paired_stats = traits.Enum('sum', 'avg', 'diff', 'diff-norm', 'diff-norm1',
'diff-norm2', argstr='--paired-%s',
desc='Compute paired sum, avg, or diff')
gmean = traits.Int(argstr='--gmean %d',
desc='create matrix to average Ng groups, Nper=Ntot/Ng')
mean_div_n = traits.Bool(argstr='--mean-div-n',
desc='compute mean/nframes (good for var)')
multiply_by = traits.Float(argstr='--mul %f',
desc='Multiply input volume by some amount')
add_val = traits.Float(argstr='--add %f',
desc='Add some amount to the input volume')
multiply_matrix_file = File(exists=True, argstr='--mtx %s',
desc='Multiply input by an ascii matrix in file')
combine = traits.Bool(argstr='--combine',
desc='Combine non-zero values into single frame volume')
keep_dtype = traits.Bool(argstr='--keep-datatype',
desc='Keep voxelwise precision type (default is float')
max_bonfcor = traits.Bool(argstr='--max-bonfcor',
desc='Compute max and bonferroni correct (assumes -log10(ps))')
max_index = traits.Bool(argstr='--max-index',
desc='Compute the index of max voxel in concatenated volumes')
mask_file = File(exists=True, argstr='--mask %s', desc='Mask input with a volume')
vote = traits.Bool(argstr='--vote',
desc='Most frequent value at each voxel and fraction of occurances')
sort = traits.Bool(argstr='--sort',
desc='Sort each voxel by ascending frame value')
class ConcatenateOutputSpec(TraitedSpec):
concatenated_file = File(exists=True,
desc='Path/name of the output volume')
class Concatenate(FSCommand):
"""Use Freesurfer mri_concat to combine several input volumes
into one output volume. Can concatenate by frames, or compute
a variety of statistics on the input volumes.
Examples
--------
Combine two input volumes into one volume with two frames
>>> concat = Concatenate()
>>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii']
>>> concat.inputs.concatenated_file = 'bar.nii'
>>> concat.cmdline
'mri_concat --o bar.nii --i cont1.nii --i cont2.nii'
"""
_cmd = 'mri_concat'
input_spec = ConcatenateInputSpec
output_spec = ConcatenateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.concatenated_file):
outputs['concatenated_file'] = os.path.join(os.getcwd(),
'concat_output.nii.gz')
else:
outputs['concatenated_file'] = self.inputs.concatenated_file
return outputs
def _gen_filename(self, name):
if name == 'concatenated_file':
return self._list_outputs()[name]
return None
class SegStatsInputSpec(FSTraitedSpec):
_xor_inputs = ('segmentation_file', 'annot', 'surf_label')
segmentation_file = File(exists=True, argstr='--seg %s', xor=_xor_inputs,
mandatory=True, desc='segmentation volume path')
annot = traits.Tuple(traits.Str, traits.Enum('lh', 'rh'), traits.Str,
argstr='--annot %s %s %s', xor=_xor_inputs,
mandatory=True,
desc='subject hemi parc : use surface parcellation')
surf_label = traits.Tuple(traits.Str, traits.Enum('lh', 'rh'), traits.Str,
argstr='--slabel %s %s %s', xor=_xor_inputs,
mandatory=True,
desc='subject hemi label : use surface label')
summary_file = File(argstr='--sum %s', genfile=True,
desc='Segmentation stats summary table file')
partial_volume_file = File(exists=True, argstr='--pv %f',
desc='Compensate for partial voluming')
in_file = File(exists=True, argstr='--i %s',
desc='Use the segmentation to report stats on this volume')
frame = traits.Int(argstr='--frame %d',
desc='Report stats on nth frame of input volume')
multiply = traits.Float(argstr='--mul %f', desc='multiply input by val')
calc_snr = traits.Bool(argstr='--snr', desc='save mean/std as extra column in output table')
calc_power = traits.Enum('sqr', 'sqrt', argstr='--%s',
desc='Compute either the sqr or the sqrt of the input')
_ctab_inputs = ('color_table_file', 'default_color_table', 'gca_color_table')
color_table_file = File(exists=True, argstr='--ctab %s', xor=_ctab_inputs,
desc='color table file with seg id names')
default_color_table = traits.Bool(argstr='--ctab-default', xor=_ctab_inputs,
desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt')
gca_color_table = File(exists=True, argstr='--ctab-gca %s', xor=_ctab_inputs,
desc='get color table from GCA (CMA)')
segment_id = traits.List(argstr='--id %s...', desc='Manually specify segmentation ids')
exclude_id = traits.Int(argstr='--excludeid %d', desc='Exclude seg id from report')
exclude_ctx_gm_wm = traits.Bool(argstr='--excl-ctxgmwm',
desc='exclude cortical gray and white matter')
wm_vol_from_surf = traits.Bool(argstr='--surf-wm-vol', desc='Compute wm volume from surf')
cortex_vol_from_surf = traits.Bool(argstr='--surf-ctx-vol', desc='Compute cortex volume from surf')
non_empty_only = traits.Bool(argstr='--nonempty', desc='Only report nonempty segmentations')
mask_file = File(exists=True, argstr='--mask %s',
desc='Mask volume (same size as seg')
mask_thresh = traits.Float(argstr='--maskthresh %f',
desc='binarize mask with this threshold <0.5>')
mask_sign = traits.Enum('abs', 'pos', 'neg', '--masksign %s',
desc='Sign for mask threshold: pos, neg, or abs')
mask_frame = traits.Int('--maskframe %d',
requires=['mask_file'],
desc='Mask with this (0 based) frame of the mask volume')
mask_invert = traits.Bool(argstr='--maskinvert', desc='Invert binarized mask volume')
mask_erode = traits.Int(argstr='--maskerode %d', desc='Erode mask by some amount')
brain_vol = traits.Enum('brain-vol-from-seg', 'brainmask', '--%s',
desc='Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``')
etiv = traits.Bool(argstr='--etiv', desc='Compute ICV from talairach transform')
etiv_only = traits.Enum('etiv', 'old-etiv', '--%s-only',
desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``')
avgwf_txt_file = traits.Either(traits.Bool, File, argstr='--avgwf %s',
desc='Save average waveform into file (bool or filename)')
avgwf_file = traits.Either(traits.Bool, File, argstr='--avgwfvol %s',
desc='Save as binary volume (bool or filename)')
sf_avg_file = traits.Either(traits.Bool, File, argstr='--sfavg %s',
desc='Save mean across space and time')
vox = traits.List(traits.Int, argstr='--vox %s',
desc='Replace seg with all 0s except at C R S (three int inputs)')
class SegStatsOutputSpec(TraitedSpec):
summary_file = File(exists=True, desc='Segmentation summary statistics table')
avgwf_txt_file = File(desc='Text file with functional statistics averaged over segs')
avgwf_file = File(desc='Volume with functional statistics averaged over segs')
sf_avg_file = File(desc='Text file with func statistics averaged over segs and framss')
class SegStats(FSCommand):
"""Use FreeSurfer mri_segstats for ROI analysis
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> ss = fs.SegStats()
>>> ss.inputs.annot = ('PWS04', 'lh', 'aparc')
>>> ss.inputs.in_file = 'functional.nii'
>>> ss.inputs.subjects_dir = '.'
>>> ss.inputs.avgwf_txt_file = './avgwf.txt'
>>> ss.inputs.summary_file = './summary.stats'
>>> ss.cmdline
'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats'
"""
_cmd = 'mri_segstats'
input_spec = SegStatsInputSpec
output_spec = SegStatsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
if not isdefined(outputs['summary_file']):
outputs['summary_file'] = os.path.join(os.getcwd(), 'summary.stats')
suffices = dict(avgwf_txt_file='_avgwf.txt', avgwf_file='_avgwf.nii.gz',
sf_avg_file='sfavg.txt')
if isdefined(self.inputs.segmentation_file):
_, src = os.path.split(self.inputs.segmentation_file)
if isdefined(self.inputs.annot):
src = '_'.join(self.inputs.annot)
if isdefined(self.inputs.surf_label):
src = '_'.join(self.inputs.surf_label)
for name, suffix in suffices.items():
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(src, suffix=suffix,
newpath=os.getcwd(),
use_ext=False)
else:
outputs[name] = os.path.abspath(value)
return outputs
def _format_arg(self, name, spec, value):
if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
return super(SegStats, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'summary_file':
return self._list_outputs()[name]
return None
class Label2VolInputSpec(FSTraitedSpec):
label_file = InputMultiPath(File(exists=True), argstr='--label %s...',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
copyfile=False,
mandatory=True,
desc='list of label files')
annot_file = File(exists=True, argstr='--annot %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
requires=('subject_id', 'hemi'),
mandatory=True,
copyfile=False,
desc='surface annotation file')
seg_file = File(exists=True, argstr='--seg %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
copyfile=False,
desc='segmentation file')
aparc_aseg = traits.Bool(argstr='--aparc+aseg',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
desc='use aparc+aseg.mgz in subjectdir as seg')
template_file = File(exists=True, argstr='--temp %s', mandatory=True,
desc='output template volume')
reg_file = File(exists=True, argstr='--reg %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='tkregister style matrix VolXYZ = R*LabelXYZ')
reg_header = File(exists=True, argstr='--regheader %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='label template volume')
identity = traits.Bool(argstr='--identity',
xor=('reg_file', 'reg_header', 'identity'),
desc='set R=I')
invert_mtx = traits.Bool(argstr='--invertmtx',
desc='Invert the registration matrix')
fill_thresh = traits.Range(0., 1., argstr='--fillthresh %.f',
desc='thresh : between 0 and 1')
label_voxel_volume = traits.Float(argstr='--labvoxvol %f',
desc='volume of each label point (def 1mm3)')
proj = traits.Tuple(traits.Enum('abs', 'frac'), traits.Float,
traits.Float, traits.Float,
argstr='--proj %s %f %f %f',
requires=('subject_id', 'hemi'),
desc='project along surface normal')
subject_id = traits.Str(argstr='--subject %s',
desc='subject id')
hemi = traits.Enum('lh', 'rh', argstr='--hemi %s',
desc='hemisphere to use lh or rh')
surface = traits.Str(argstr='--surf %s',
desc='use surface instead of white')
vol_label_file = File(argstr='--o %s', genfile=True,
desc='output volume')
label_hit_file = File(argstr='--hits %s',
desc='file with each frame is nhits for a label')
map_label_stat = File(argstr='--label-stat %s',
desc='map the label stats field into the vol')
native_vox2ras = traits.Bool(argstr='--native-vox2ras',
desc='use native vox2ras xform instead of tkregister-style')
class Label2VolOutputSpec(TraitedSpec):
vol_label_file = File(exists=True, desc='output volume')
class Label2Vol(FSCommand):
"""Make a binary volume from a Freesurfer label
Examples
--------
>>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii')
>>> binvol.cmdline
'mri_label2vol --fillthresh 0 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii'
"""
_cmd = 'mri_label2vol'
input_spec = Label2VolInputSpec
output_spec = Label2VolOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.vol_label_file
if not isdefined(outfile):
for key in ['label_file', 'annot_file', 'seg_file']:
if isdefined(getattr(self.inputs,key)):
path = getattr(self.inputs, key)
if isinstance(path,list):
path = path[0]
_, src = os.path.split(path)
if isdefined(self.inputs.aparc_aseg):
src = 'aparc+aseg.mgz'
outfile = fname_presuffix(src, suffix='_vol.nii.gz',
newpath=os.getcwd(),
use_ext=False)
outputs['vol_label_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'vol_label_file':
return self._list_outputs()[name]
return None
class MS_LDAInputSpec(FSTraitedSpec):
lda_labels = traits.List(traits.Int(), argstr='-lda %s', mandatory=True,
minlen=2, maxlen=2, sep=' ',
desc='pair of class labels to optimize')
weight_file = traits.File(argstr='-weight %s', mandatory=True,
desc='filename for the LDA weights (input or output)')
vol_synth_file = traits.File(exists=False, argstr='-synth %s',
mandatory=True,
desc=('filename for the synthesized output '
'volume'))
label_file = traits.File(exists=True, argstr='-label %s',
desc='filename of the label volume')
mask_file = traits.File(exists=True, argstr='-mask %s',
desc='filename of the brain mask volume')
shift = traits.Int(argstr='-shift %d',
desc='shift all values equal to the given value to zero')
conform = traits.Bool(argstr='-conform',
desc=('Conform the input volumes (brain mask '
'typically already conformed)'))
use_weights = traits.Bool(argstr='-W',
desc=('Use the weights from a previously '
'generated weight file'))
images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True,
copyfile=False, desc='list of input FLASH images',
position=-1)
class MS_LDAOutputSpec(TraitedSpec):
weight_file = File(exists=True, desc='')
vol_synth_file = File(exists=True, desc='')
class MS_LDA(FSCommand):
"""Perform LDA reduction on the intensity space of an arbitrary # of FLASH images
Examples
--------
>>> grey_label = 2
>>> white_label = 3
>>> zero_value = 1
>>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], \
label_file='label.mgz', weight_file='weights.txt', \
shift=zero_value, vol_synth_file='synth_out.mgz', \
conform=True, use_weights=True, \
images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz'])
>>> optimalWeights.cmdline
'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz'
"""
_cmd = 'mri_ms_LDA'
input_spec = MS_LDAInputSpec
output_spec = MS_LDAOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_synth):
outputs['vol_synth_file'] = os.path.abspath(self.inputs.output_synth)
else:
outputs['vol_synth_file'] = os.path.abspath(self.inputs.vol_synth_file)
if not isdefined(self.inputs.use_weights) or self.inputs.use_weights is False:
outputs['weight_file'] = os.path.abspath(self.inputs.weight_file)
return outputs
def _verify_weights_file_exists(self):
if not os.path.exists(os.path.abspath(self.inputs.weight_file)):
raise traits.TraitError("MS_LDA: use_weights must accompany an existing weights file")
def _format_arg(self, name, spec, value):
if name is 'use_weights':
if self.inputs.use_weights is True:
self._verify_weights_file_exists()
else:
return ''
# TODO: Fix bug when boolean values are set explicitly to false
return super(MS_LDA, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
pass
| fprados/nipype | nipype/interfaces/freesurfer/model.py | Python | bsd-3-clause | 41,958 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/TestScript) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class TestScript(domainresource.DomainResource):
""" Describes a set of tests.
A structured set of tests against a FHIR server implementation to determine
compliance against the FHIR specification.
"""
resource_type = "TestScript"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date this was last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the test script.
Type `str`. """
self.destination = None
""" An abstract server representing a destination or receiver in a
message exchange.
List of `TestScriptDestination` items (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.fixture = None
""" Fixture in the test script - by reference (uri).
List of `TestScriptFixture` items (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the test script.
Type `Identifier` (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for test script (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.metadata = None
""" Required capability that is assumed to function correctly on the
FHIR server being tested.
Type `TestScriptMetadata` (represented as `dict` in JSON). """
self.name = None
""" Name for this test script (computer friendly).
Type `str`. """
self.origin = None
""" An abstract server representing a client or sender in a message
exchange.
List of `TestScriptOrigin` items (represented as `dict` in JSON). """
self.profile = None
""" Reference of the validation profile.
List of `FHIRReference` items referencing `Resource` (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this test script is defined.
Type `str`. """
self.rule = None
""" Assert rule used within the test script.
List of `TestScriptRule` items (represented as `dict` in JSON). """
self.ruleset = None
""" Assert ruleset used within the test script.
List of `TestScriptRuleset` items (represented as `dict` in JSON). """
self.setup = None
""" A series of required setup operations before tests are executed.
Type `TestScriptSetup` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.teardown = None
""" A series of required clean up steps.
Type `TestScriptTeardown` (represented as `dict` in JSON). """
self.test = None
""" A test in this script.
List of `TestScriptTest` items (represented as `dict` in JSON). """
self.title = None
""" Name for this test script (human friendly).
Type `str`. """
self.url = None
""" Logical URI to reference this test script (globally unique).
Type `str`. """
self.useContext = None
""" Context the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.variable = None
""" Placeholder for evaluated elements.
List of `TestScriptVariable` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the test script.
Type `str`. """
super(TestScript, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScript, self).elementProperties()
js.extend([
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("destination", "destination", TestScriptDestination, True, None, False),
("experimental", "experimental", bool, False, None, False),
("fixture", "fixture", TestScriptFixture, True, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("metadata", "metadata", TestScriptMetadata, False, None, False),
("name", "name", str, False, None, True),
("origin", "origin", TestScriptOrigin, True, None, False),
("profile", "profile", fhirreference.FHIRReference, True, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("rule", "rule", TestScriptRule, True, None, False),
("ruleset", "ruleset", TestScriptRuleset, True, None, False),
("setup", "setup", TestScriptSetup, False, None, False),
("status", "status", str, False, None, True),
("teardown", "teardown", TestScriptTeardown, False, None, False),
("test", "test", TestScriptTest, True, None, False),
("title", "title", str, False, None, False),
("url", "url", str, False, None, True),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("variable", "variable", TestScriptVariable, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class TestScriptDestination(backboneelement.BackboneElement):
""" An abstract server representing a destination or receiver in a message
exchange.
An abstract server used in operations within this test script in the
destination element.
"""
resource_type = "TestScriptDestination"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.index = None
""" The index of the abstract destination server starting at 1.
Type `int`. """
self.profile = None
""" FHIR-Server | FHIR-SDC-FormManager | FHIR-SDC-FormReceiver | FHIR-
SDC-FormProcessor.
Type `Coding` (represented as `dict` in JSON). """
super(TestScriptDestination, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptDestination, self).elementProperties()
js.extend([
("index", "index", int, False, None, True),
("profile", "profile", coding.Coding, False, None, True),
])
return js
class TestScriptFixture(backboneelement.BackboneElement):
""" Fixture in the test script - by reference (uri).
Fixture in the test script - by reference (uri). All fixtures are required
for the test script to execute.
"""
resource_type = "TestScriptFixture"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.autocreate = None
""" Whether or not to implicitly create the fixture during setup.
Type `bool`. """
self.autodelete = None
""" Whether or not to implicitly delete the fixture during teardown.
Type `bool`. """
self.resource = None
""" Reference of the resource.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(TestScriptFixture, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptFixture, self).elementProperties()
js.extend([
("autocreate", "autocreate", bool, False, None, False),
("autodelete", "autodelete", bool, False, None, False),
("resource", "resource", fhirreference.FHIRReference, False, None, False),
])
return js
class TestScriptMetadata(backboneelement.BackboneElement):
""" Required capability that is assumed to function correctly on the FHIR
server being tested.
The required capability must exist and are assumed to function correctly on
the FHIR server being tested.
"""
resource_type = "TestScriptMetadata"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.capability = None
""" Capabilities that are assumed to function correctly on the FHIR
server being tested.
List of `TestScriptMetadataCapability` items (represented as `dict` in JSON). """
self.link = None
""" Links to the FHIR specification.
List of `TestScriptMetadataLink` items (represented as `dict` in JSON). """
super(TestScriptMetadata, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadata, self).elementProperties()
js.extend([
("capability", "capability", TestScriptMetadataCapability, True, None, True),
("link", "link", TestScriptMetadataLink, True, None, False),
])
return js
class TestScriptMetadataCapability(backboneelement.BackboneElement):
""" Capabilities that are assumed to function correctly on the FHIR server
being tested.
Capabilities that must exist and are assumed to function correctly on the
FHIR server being tested.
"""
resource_type = "TestScriptMetadataCapability"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.capabilities = None
""" Required Capability Statement.
Type `FHIRReference` referencing `CapabilityStatement` (represented as `dict` in JSON). """
self.description = None
""" The expected capabilities of the server.
Type `str`. """
self.destination = None
""" Which server these requirements apply to.
Type `int`. """
self.link = None
""" Links to the FHIR specification.
List of `str` items. """
self.origin = None
""" Which origin server these requirements apply to.
List of `int` items. """
self.required = None
""" Are the capabilities required?.
Type `bool`. """
self.validated = None
""" Are the capabilities validated?.
Type `bool`. """
super(TestScriptMetadataCapability, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadataCapability, self).elementProperties()
js.extend([
("capabilities", "capabilities", fhirreference.FHIRReference, False, None, True),
("description", "description", str, False, None, False),
("destination", "destination", int, False, None, False),
("link", "link", str, True, None, False),
("origin", "origin", int, True, None, False),
("required", "required", bool, False, None, False),
("validated", "validated", bool, False, None, False),
])
return js
class TestScriptMetadataLink(backboneelement.BackboneElement):
""" Links to the FHIR specification.
A link to the FHIR specification that this test is covering.
"""
resource_type = "TestScriptMetadataLink"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Short description.
Type `str`. """
self.url = None
""" URL to the specification.
Type `str`. """
super(TestScriptMetadataLink, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadataLink, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("url", "url", str, False, None, True),
])
return js
class TestScriptOrigin(backboneelement.BackboneElement):
""" An abstract server representing a client or sender in a message exchange.
An abstract server used in operations within this test script in the origin
element.
"""
resource_type = "TestScriptOrigin"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.index = None
""" The index of the abstract origin server starting at 1.
Type `int`. """
self.profile = None
""" FHIR-Client | FHIR-SDC-FormFiller.
Type `Coding` (represented as `dict` in JSON). """
super(TestScriptOrigin, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptOrigin, self).elementProperties()
js.extend([
("index", "index", int, False, None, True),
("profile", "profile", coding.Coding, False, None, True),
])
return js
class TestScriptRule(backboneelement.BackboneElement):
""" Assert rule used within the test script.
Assert rule to be used in one or more asserts within the test script.
"""
resource_type = "TestScriptRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.param = None
""" Rule parameter template.
List of `TestScriptRuleParam` items (represented as `dict` in JSON). """
self.resource = None
""" Assert rule resource reference.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(TestScriptRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptRule, self).elementProperties()
js.extend([
("param", "param", TestScriptRuleParam, True, None, False),
("resource", "resource", fhirreference.FHIRReference, False, None, True),
])
return js
class TestScriptRuleParam(backboneelement.BackboneElement):
""" Rule parameter template.
Each rule template can take one or more parameters for rule evaluation.
"""
resource_type = "TestScriptRuleParam"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Parameter name matching external assert rule parameter.
Type `str`. """
self.value = None
""" Parameter value defined either explicitly or dynamically.
Type `str`. """
super(TestScriptRuleParam, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptRuleParam, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("value", "value", str, False, None, False),
])
return js
class TestScriptRuleset(backboneelement.BackboneElement):
""" Assert ruleset used within the test script.
Contains one or more rules. Offers a way to group rules so assertions
could reference the group of rules and have them all applied.
"""
resource_type = "TestScriptRuleset"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.resource = None
""" Assert ruleset resource reference.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.rule = None
""" The referenced rule within the ruleset.
List of `TestScriptRulesetRule` items (represented as `dict` in JSON). """
super(TestScriptRuleset, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptRuleset, self).elementProperties()
js.extend([
("resource", "resource", fhirreference.FHIRReference, False, None, True),
("rule", "rule", TestScriptRulesetRule, True, None, True),
])
return js
class TestScriptRulesetRule(backboneelement.BackboneElement):
""" The referenced rule within the ruleset.
The referenced rule within the external ruleset template.
"""
resource_type = "TestScriptRulesetRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.param = None
""" Ruleset rule parameter template.
List of `TestScriptRulesetRuleParam` items (represented as `dict` in JSON). """
self.ruleId = None
""" Id of referenced rule within the ruleset.
Type `str`. """
super(TestScriptRulesetRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptRulesetRule, self).elementProperties()
js.extend([
("param", "param", TestScriptRulesetRuleParam, True, None, False),
("ruleId", "ruleId", str, False, None, True),
])
return js
class TestScriptRulesetRuleParam(backboneelement.BackboneElement):
""" Ruleset rule parameter template.
Each rule template can take one or more parameters for rule evaluation.
"""
resource_type = "TestScriptRulesetRuleParam"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Parameter name matching external assert ruleset rule parameter.
Type `str`. """
self.value = None
""" Parameter value defined either explicitly or dynamically.
Type `str`. """
super(TestScriptRulesetRuleParam, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptRulesetRuleParam, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("value", "value", str, False, None, False),
])
return js
class TestScriptSetup(backboneelement.BackboneElement):
""" A series of required setup operations before tests are executed.
"""
resource_type = "TestScriptSetup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" A setup operation or assert to perform.
List of `TestScriptSetupAction` items (represented as `dict` in JSON). """
super(TestScriptSetup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetup, self).elementProperties()
js.extend([
("action", "action", TestScriptSetupAction, True, None, True),
])
return js
class TestScriptSetupAction(backboneelement.BackboneElement):
""" A setup operation or assert to perform.
Action would contain either an operation or an assertion.
"""
resource_type = "TestScriptSetupAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assert_fhir = None
""" The assertion to perform.
Type `TestScriptSetupActionAssert` (represented as `dict` in JSON). """
self.operation = None
""" The setup operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptSetupAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupAction, self).elementProperties()
js.extend([
("assert_fhir", "assert", TestScriptSetupActionAssert, False, None, False),
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptSetupActionAssert(backboneelement.BackboneElement):
""" The assertion to perform.
Evaluates the results of previous operations to determine if the server
under test behaves appropriately.
"""
resource_type = "TestScriptSetupActionAssert"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.compareToSourceExpression = None
""" The fluentpath expression to evaluate against the source fixture.
Type `str`. """
self.compareToSourceId = None
""" Id of the source fixture to be evaluated.
Type `str`. """
self.compareToSourcePath = None
""" XPath or JSONPath expression to evaluate against the source fixture.
Type `str`. """
self.contentType = None
""" xml | json | ttl | none.
Type `str`. """
self.description = None
""" Tracking/reporting assertion description.
Type `str`. """
self.direction = None
""" response | request.
Type `str`. """
self.expression = None
""" The fluentpath expression to be evaluated.
Type `str`. """
self.headerField = None
""" HTTP header field name.
Type `str`. """
self.label = None
""" Tracking/logging assertion label.
Type `str`. """
self.minimumId = None
""" Fixture Id of minimum content resource.
Type `str`. """
self.navigationLinks = None
""" Perform validation on navigation links?.
Type `bool`. """
self.operator = None
""" equals | notEquals | in | notIn | greaterThan | lessThan | empty |
notEmpty | contains | notContains | eval.
Type `str`. """
self.path = None
""" XPath or JSONPath expression.
Type `str`. """
self.requestMethod = None
""" delete | get | options | patch | post | put.
Type `str`. """
self.requestURL = None
""" Request URL comparison value.
Type `str`. """
self.resource = None
""" Resource type.
Type `str`. """
self.response = None
""" okay | created | noContent | notModified | bad | forbidden |
notFound | methodNotAllowed | conflict | gone | preconditionFailed
| unprocessable.
Type `str`. """
self.responseCode = None
""" HTTP response code to test.
Type `str`. """
self.rule = None
""" The reference to a TestScript.rule.
Type `TestScriptSetupActionAssertRule` (represented as `dict` in JSON). """
self.ruleset = None
""" The reference to a TestScript.ruleset.
Type `TestScriptSetupActionAssertRuleset` (represented as `dict` in JSON). """
self.sourceId = None
""" Fixture Id of source expression or headerField.
Type `str`. """
self.validateProfileId = None
""" Profile Id of validation profile reference.
Type `str`. """
self.value = None
""" The value to compare to.
Type `str`. """
self.warningOnly = None
""" Will this assert produce a warning only on error?.
Type `bool`. """
super(TestScriptSetupActionAssert, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssert, self).elementProperties()
js.extend([
("compareToSourceExpression", "compareToSourceExpression", str, False, None, False),
("compareToSourceId", "compareToSourceId", str, False, None, False),
("compareToSourcePath", "compareToSourcePath", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("description", "description", str, False, None, False),
("direction", "direction", str, False, None, False),
("expression", "expression", str, False, None, False),
("headerField", "headerField", str, False, None, False),
("label", "label", str, False, None, False),
("minimumId", "minimumId", str, False, None, False),
("navigationLinks", "navigationLinks", bool, False, None, False),
("operator", "operator", str, False, None, False),
("path", "path", str, False, None, False),
("requestMethod", "requestMethod", str, False, None, False),
("requestURL", "requestURL", str, False, None, False),
("resource", "resource", str, False, None, False),
("response", "response", str, False, None, False),
("responseCode", "responseCode", str, False, None, False),
("rule", "rule", TestScriptSetupActionAssertRule, False, None, False),
("ruleset", "ruleset", TestScriptSetupActionAssertRuleset, False, None, False),
("sourceId", "sourceId", str, False, None, False),
("validateProfileId", "validateProfileId", str, False, None, False),
("value", "value", str, False, None, False),
("warningOnly", "warningOnly", bool, False, None, False),
])
return js
class TestScriptSetupActionAssertRule(backboneelement.BackboneElement):
""" The reference to a TestScript.rule.
The TestScript.rule this assert will evaluate.
"""
resource_type = "TestScriptSetupActionAssertRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.param = None
""" Rule parameter template.
List of `TestScriptSetupActionAssertRuleParam` items (represented as `dict` in JSON). """
self.ruleId = None
""" Id of the TestScript.rule.
Type `str`. """
super(TestScriptSetupActionAssertRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssertRule, self).elementProperties()
js.extend([
("param", "param", TestScriptSetupActionAssertRuleParam, True, None, False),
("ruleId", "ruleId", str, False, None, True),
])
return js
class TestScriptSetupActionAssertRuleParam(backboneelement.BackboneElement):
""" Rule parameter template.
Each rule template can take one or more parameters for rule evaluation.
"""
resource_type = "TestScriptSetupActionAssertRuleParam"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Parameter name matching external assert rule parameter.
Type `str`. """
self.value = None
""" Parameter value defined either explicitly or dynamically.
Type `str`. """
super(TestScriptSetupActionAssertRuleParam, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssertRuleParam, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class TestScriptSetupActionAssertRuleset(backboneelement.BackboneElement):
""" The reference to a TestScript.ruleset.
The TestScript.ruleset this assert will evaluate.
"""
resource_type = "TestScriptSetupActionAssertRuleset"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.rule = None
""" The referenced rule within the ruleset.
List of `TestScriptSetupActionAssertRulesetRule` items (represented as `dict` in JSON). """
self.rulesetId = None
""" Id of the TestScript.ruleset.
Type `str`. """
super(TestScriptSetupActionAssertRuleset, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssertRuleset, self).elementProperties()
js.extend([
("rule", "rule", TestScriptSetupActionAssertRulesetRule, True, None, False),
("rulesetId", "rulesetId", str, False, None, True),
])
return js
class TestScriptSetupActionAssertRulesetRule(backboneelement.BackboneElement):
""" The referenced rule within the ruleset.
The referenced rule within the external ruleset template.
"""
resource_type = "TestScriptSetupActionAssertRulesetRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.param = None
""" Rule parameter template.
List of `TestScriptSetupActionAssertRulesetRuleParam` items (represented as `dict` in JSON). """
self.ruleId = None
""" Id of referenced rule within the ruleset.
Type `str`. """
super(TestScriptSetupActionAssertRulesetRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssertRulesetRule, self).elementProperties()
js.extend([
("param", "param", TestScriptSetupActionAssertRulesetRuleParam, True, None, False),
("ruleId", "ruleId", str, False, None, True),
])
return js
class TestScriptSetupActionAssertRulesetRuleParam(backboneelement.BackboneElement):
""" Rule parameter template.
Each rule template can take one or more parameters for rule evaluation.
"""
resource_type = "TestScriptSetupActionAssertRulesetRuleParam"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Parameter name matching external assert ruleset rule parameter.
Type `str`. """
self.value = None
""" Parameter value defined either explicitly or dynamically.
Type `str`. """
super(TestScriptSetupActionAssertRulesetRuleParam, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssertRulesetRuleParam, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class TestScriptSetupActionOperation(backboneelement.BackboneElement):
""" The setup operation to perform.
The operation to perform.
"""
resource_type = "TestScriptSetupActionOperation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.accept = None
""" xml | json | ttl | none.
Type `str`. """
self.contentType = None
""" xml | json | ttl | none.
Type `str`. """
self.description = None
""" Tracking/reporting operation description.
Type `str`. """
self.destination = None
""" Server responding to the request.
Type `int`. """
self.encodeRequestUrl = None
""" Whether or not to send the request url in encoded format.
Type `bool`. """
self.label = None
""" Tracking/logging operation label.
Type `str`. """
self.origin = None
""" Server initiating the request.
Type `int`. """
self.params = None
""" Explicitly defined path parameters.
Type `str`. """
self.requestHeader = None
""" Each operation can have one or more header elements.
List of `TestScriptSetupActionOperationRequestHeader` items (represented as `dict` in JSON). """
self.requestId = None
""" Fixture Id of mapped request.
Type `str`. """
self.resource = None
""" Resource type.
Type `str`. """
self.responseId = None
""" Fixture Id of mapped response.
Type `str`. """
self.sourceId = None
""" Fixture Id of body for PUT and POST requests.
Type `str`. """
self.targetId = None
""" Id of fixture used for extracting the [id], [type], and [vid] for
GET requests.
Type `str`. """
self.type = None
""" The operation code type that will be executed.
Type `Coding` (represented as `dict` in JSON). """
self.url = None
""" Request URL.
Type `str`. """
super(TestScriptSetupActionOperation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionOperation, self).elementProperties()
js.extend([
("accept", "accept", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("description", "description", str, False, None, False),
("destination", "destination", int, False, None, False),
("encodeRequestUrl", "encodeRequestUrl", bool, False, None, False),
("label", "label", str, False, None, False),
("origin", "origin", int, False, None, False),
("params", "params", str, False, None, False),
("requestHeader", "requestHeader", TestScriptSetupActionOperationRequestHeader, True, None, False),
("requestId", "requestId", str, False, None, False),
("resource", "resource", str, False, None, False),
("responseId", "responseId", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
("targetId", "targetId", str, False, None, False),
("type", "type", coding.Coding, False, None, False),
("url", "url", str, False, None, False),
])
return js
class TestScriptSetupActionOperationRequestHeader(backboneelement.BackboneElement):
""" Each operation can have one or more header elements.
Header elements would be used to set HTTP headers.
"""
resource_type = "TestScriptSetupActionOperationRequestHeader"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.field = None
""" HTTP header field name.
Type `str`. """
self.value = None
""" HTTP headerfield value.
Type `str`. """
super(TestScriptSetupActionOperationRequestHeader, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionOperationRequestHeader, self).elementProperties()
js.extend([
("field", "field", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class TestScriptTeardown(backboneelement.BackboneElement):
""" A series of required clean up steps.
A series of operations required to clean up after the all the tests are
executed (successfully or otherwise).
"""
resource_type = "TestScriptTeardown"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" One or more teardown operations to perform.
List of `TestScriptTeardownAction` items (represented as `dict` in JSON). """
super(TestScriptTeardown, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTeardown, self).elementProperties()
js.extend([
("action", "action", TestScriptTeardownAction, True, None, True),
])
return js
class TestScriptTeardownAction(backboneelement.BackboneElement):
""" One or more teardown operations to perform.
The teardown action will only contain an operation.
"""
resource_type = "TestScriptTeardownAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.operation = None
""" The teardown operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptTeardownAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTeardownAction, self).elementProperties()
js.extend([
("operation", "operation", TestScriptSetupActionOperation, False, None, True),
])
return js
class TestScriptTest(backboneelement.BackboneElement):
""" A test in this script.
"""
resource_type = "TestScriptTest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" A test operation or assert to perform.
List of `TestScriptTestAction` items (represented as `dict` in JSON). """
self.description = None
""" Tracking/reporting short description of the test.
Type `str`. """
self.name = None
""" Tracking/logging name of this test.
Type `str`. """
super(TestScriptTest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTest, self).elementProperties()
js.extend([
("action", "action", TestScriptTestAction, True, None, True),
("description", "description", str, False, None, False),
("name", "name", str, False, None, False),
])
return js
class TestScriptTestAction(backboneelement.BackboneElement):
""" A test operation or assert to perform.
Action would contain either an operation or an assertion.
"""
resource_type = "TestScriptTestAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assert_fhir = None
""" The setup assertion to perform.
Type `TestScriptSetupActionAssert` (represented as `dict` in JSON). """
self.operation = None
""" The setup operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptTestAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTestAction, self).elementProperties()
js.extend([
("assert_fhir", "assert", TestScriptSetupActionAssert, False, None, False),
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptVariable(backboneelement.BackboneElement):
""" Placeholder for evaluated elements.
Variable is set based either on element value in response body or on header
field value in the response headers.
"""
resource_type = "TestScriptVariable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.defaultValue = None
""" Default, hard-coded, or user-defined value for this variable.
Type `str`. """
self.description = None
""" Natural language description of the variable.
Type `str`. """
self.expression = None
""" The fluentpath expression against the fixture body.
Type `str`. """
self.headerField = None
""" HTTP header field name for source.
Type `str`. """
self.hint = None
""" Hint help text for default value to enter.
Type `str`. """
self.name = None
""" Descriptive name for this variable.
Type `str`. """
self.path = None
""" XPath or JSONPath against the fixture body.
Type `str`. """
self.sourceId = None
""" Fixture Id of source expression or headerField within this variable.
Type `str`. """
super(TestScriptVariable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptVariable, self).elementProperties()
js.extend([
("defaultValue", "defaultValue", str, False, None, False),
("description", "description", str, False, None, False),
("expression", "expression", str, False, None, False),
("headerField", "headerField", str, False, None, False),
("hint", "hint", str, False, None, False),
("name", "name", str, False, None, True),
("path", "path", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/testscript.py | Python | bsd-3-clause | 50,708 |
import shelve, time, random
def main(connection, info) :
"""This is the old plugin"""
#"""Run every time a message is seen"""
if info["message"].startswith("\x01ACTION") and info["message"].endswith("\x01") :
on_ACTION(connection, info)
return None
# if info["sender"] == "OperServ" :
# words = info["message"].split(" ")
# if words[0] == "REGISTER:" :
# newchannel = words[1].replace("\002", "")
# registeree = words[3].replace("\002", "")
# connection.rawsend("JOIN %s\n" % (newchannel))
# connection.rawsend("MODE %s +o %s\n" % (newchannel, conf.nick))
# connection.msg(newchannel, "Hello %s, I am sonicbot and I am here to help you with IRC." % (registeree))
seendb = shelve.open("seen.db", writeback=True)
if not seendb.has_key("users") :
seendb["users"] = {}
seendb.sync()
seendb["users"][info["sender"].lower()] = [time.time(), info["message"]]
seendb.sync()
seendb.close()
badwords = shelve.open("badwords.db", writeback=True)
if badwords.has_key(connection.host) :
if badwords[connection.host].has_key(info["channel"]) :
nosay = badwords[connection.host][info["channel"]]["badwords"]
for word in nosay :
if word in [message.replace(".", "").replace("!","").replace("?", "") for message in info["message"].lower().split(" ")] :
if info["sender"] not in badwords[connection.host][info["channel"]]["users"] :
badwords[connection.host][info["channel"]]["users"][info["sender"]] = 0
badwords.sync()
# if badwords[connection.host][info["channel"]]["users"][info["sender"]] > 0 :
# if info["sender"] in connection.hostnames.keys() :
# target = "*!*@%s" % (connection.hostnames[info["sender"]])
# else : target = "%s*!*@*" % (info["sender"])
# connection.rawsend("MODE %s +b %s\n" % (info["channel"], target))
connection.rawsend("KICK %s %s :%s (%s)\n" % (info["channel"], info["sender"], "Don't use that word!", word))
badwords[connection.host][info["channel"]]["users"][info["sender"]] += 1
badwords.sync()
badwords.close()
if info["sender"] not in connection.ignorelist :
if info["message"].lower().startswith("hi") or info["message"].lower().startswith("hello") or info["message"].lower().startswith("hey") :
if connection.nick.lower() in info["message"].lower() :
connection.msg(info["channel"], _("Hello %(sender)s!") % dict(sender=info["sender"]))
contextdb = shelve.open("context.db", writeback=True)
if not contextdb.has_key(info["channel"]) and info["channel"].startswith("#") :
contextdb[info["channel"]] = ["<%s> %s" % (info["sender"], info["message"])]
contextdb.sync()
elif contextdb.has_key(info["channel"]) :
contextdb[info["channel"]].append("<%s> %s" % (info["sender"], info["message"]))
contextdb.sync()
if len(contextdb[info["channel"]]) > 10 :
contextdb[info["channel"]].pop(0)
contextdb.sync()
contextdb.close()
memos = shelve.open("memos.db", writeback=True)
if memos.has_key(info["sender"].lower()) :
for memo in memos[info["sender"].lower()] :
connection.ircsend(info["channel"], "%(sender)s: %(memoer)s sent you a memo! '%(memo)s'" % {"sender":info["sender"], "memoer":memo["sender"], "memo":memo["message"]})
memos[info["sender"].lower()] = []
memos.sync()
memos.close()
# if info["sender"] not in conf.ignorelist and info["hostname"] not in conf.hostignores :
# combos = shelve.open("combos.db", writeback=True)
# if info["channel"] not in combos.keys() :
# combos[info["channel"]] = []
# combos.sync()
# combos[info["channel"]].append(info["message"])
# combos.sync()
# if len(combos[info["channel"]]) > 3 :
# combos[info["channel"]].pop(0)
# combos.sync()
# if len(combos[info["channel"]]) == 3 :
# temp = combos[info["channel"]]
# if temp[1].lower().startswith(temp[0].lower()) and temp[2].lower().startswith(temp[0].lower()) :
# connection.msg(info["channel"], temp[0])
# del combos[info["channel"]]
# combos.sync()
# combos.close()
if info["message"].startswith("PING") : connection.notice(info["sender"], info["message"])
mail = shelve.open("mail.db", writeback=True)
if info["sender"].replace("[", "").replace("]", "") in mail.keys() :
if info["hostname"] in mail[info["sender"].replace("[", "").replace("]", "")]["hostname"] :
if mail[info["sender"].replace("[", "").replace("]", "")]["notify"] :
connection.msg(info["sender"], _("You have new mail."))
mail[info["sender"].replace("[", "").replace("]", "")]["notify"] = False
mail.sync()
mail.close()
emotions = shelve.open("emotions.db", writeback=True)
info["sender"] = info["sender"].lower()
if info["sender"].lower() not in emotions.keys() and happiness_detect(info) :
emotions[info["sender"].lower()] = {}
emotions.sync()
emotions[info["sender"].lower()]["happy"] = 0
emotions.sync()
emotions[info["sender"].lower()]["sad"] = 0
emotions.sync()
if info["sender"].lower() in emotions.keys() :
for emotion in [":)", ":D", "C:", "=D", ";p", "=)", "C=", "(=", "(:" "xD", "=p", ":p"] :
if emotion in info["message"] :
emotions[info["sender"].lower()]["happy"] += 1
emotions.sync()
break
for emotion in [":(", "D:", "=(", "D=", "):", ")=", "=C", ":C"] :
if emotion in info["message"] :
emotions[info["sender"].lower()]["sad"] += 1
emotions.sync()
break
if ":P" in info["message"] :
emotions[info["sender"].lower()]["happy"] += .5
emotions.sync()
emotions.close()
notify = shelve.open("notify.db", writeback=True)
if info["sender"] in notify.keys() :
temp = notify[info["sender"]]
for user in temp :
connection.msg(user, _("%(nick)s has just said something in %(channel)s") % dict(nick=info["sender"], channel=info["channel"]))
notify[info["sender"]].remove(user)
notify.sync()
if notify[info["sender"]] == [] :
del notify[info["sender"]]
notify.sync()
notify.close()
def happiness_detect(info) :
"""Checks to see if a smiley is in the message"""
for emotion in [":)", ":D", "C:", "=D", "=)", "C=", "(=", "(:" "xD", ":p", ";p", "=p", ":(", "D:", "=(", "D=", "):", ")=", "=C", ":C", ":P"] :
if emotion in info["message"] : return True
return False
def on_ACTION(connection, info) :
"""Runs every time somebody does an action (/me)"""
badwords = shelve.open("badwords.db", writeback=True)
if badwords.has_key(connection.host) :
if badwords[connection.host].has_key(info["channel"]) :
nosay = badwords[connection.host][info["channel"]]["badwords"]
for word in nosay :
if word in [message.replace(".", "").replace("!","").replace("?", "") for message in info["message"].lower().split(" ")] :
if info["sender"] not in badwords[connection.host][info["channel"]]["users"] :
badwords[connection.host][info["channel"]]["users"][info["sender"]] = 0
badwords.sync()
# if badwords[connection.host][info["channel"]]["users"][info["sender"]] > 0 :
# if info["sender"] in connection.hostnames.keys() :
# target = "*!*@%s" % (connection.hostnames[info["sender"]])
# else : target = "%s*!*@*" % (info["sender"])
# connection.rawsend("MODE %s +b %s\n" % (info["channel"], target))
connection.rawsend("KICK %s %s :%s (%s)\n" % (info["channel"], info["sender"], "Don't use that word!", word))
badwords[connection.host][info["channel"]]["users"][info["sender"]] += 1
badwords.sync()
badwords.close()
memos = shelve.open("memos.db", writeback=True)
if memos.has_key(info["sender"].lower()) :
for memo in memos[info["sender"].lower()] :
connection.ircsend(info["channel"], "%(sender)s: %(memoer)s sent you a memo! '%(memo)s'" % {"sender":info["sender"], "memoer":memo["sender"], "memo":memo["message"]})
memos[info["sender"].lower()] = []
memos.sync()
memos.close()
args = info["message"].replace("\x01", "").split(" ")[1:]
contextdb = shelve.open("context.db", writeback=True)
if not contextdb.has_key(info["channel"]) and info["channel"].startswith("#") :
contextdb[info["channel"]] = ["<%s> %s" % (info["sender"], info["message"])]
contextdb.sync()
elif contextdb.has_key(info["channel"]) :
contextdb[info["channel"]].append("*%s %s" % (info["sender"], " ".join(args).replace("", "")))
contextdb.sync()
if len(contextdb[info["channel"]]) > 10 :
contextdb[info["channel"]].pop(0)
contextdb.sync()
contextdb.close()
seendb = shelve.open("seen.db", writeback=True)
if not seendb.has_key("users") :
seendb["users"] = {}
seendb.sync()
seendb["users"][info["sender"].lower()] = [time.time(), "*%s %s" % (info["sender"], " ".join(args).replace("", ""))]
seendb.close()
if len(args) > 1 :
if args[0] in ["slaps", "punches", "stomps", "hurts", "rapes", "hits", "fucks", "smacks", "crunches", "kicks", "barfs", "forces", "force", "squishes", "bodyslams", "shoots", "compresses", "tackles", "stabs"] :
if args[1] == connection.nick or args[-1] == connection.nick :
connection.msg(info["channel"], random.choice(["Oww!", "Ouch, that hurt!", "\x01ACTION curls up in fetal position\x01", "\x01ACTION slaps %s\x01" % (info["sender"]), "\x01ACTION smacks %s\x01" % (info["sender"]), "\x01ACTION kicks %s\x01" % (info["sender"]), "\x01ACTION explodes\x01"]))
if len(args) > 1 :
if args[0].lower() == "hugs" and args[1] == connection.nick :
connection.msg(info["channel"], "\x01ACTION hugs %(sender)s\x01" % dict(sender=info["sender"]))
| sonicrules1234/sonicbot | oldplugins/on_PRIVMSG.py | Python | bsd-3-clause | 10,821 |
import math
import random
import onmt
from torch.autograd import Variable
class Dataset(object):
def __init__(self, srcData, tgtData, batchSize, cuda, volatile=False):
self.src = srcData
if tgtData:
self.tgt = tgtData
assert(len(self.src) == len(self.tgt))
else:
self.tgt = None
self.cuda = cuda
self.batchSize = batchSize
self.numBatches = math.ceil(len(self.src)/batchSize)
self.volatile = volatile
def _batchify(self, data, align_right=False):
max_length = max(x.size(0) for x in data)
out = data[0].new(len(data), max_length).fill_(onmt.Constants.PAD)
for i in range(len(data)):
data_length = data[i].size(0)
offset = max_length - data_length if align_right else 0
out[i].narrow(0, offset, data_length).copy_(data[i])
out = out.t().contiguous()
if self.cuda:
out = out.cuda()
v = Variable(out, volatile=self.volatile)
return v
def __getitem__(self, index):
assert index < self.numBatches, "%d > %d" % (index, self.numBatches)
srcBatch = self._batchify(
self.src[index*self.batchSize:(index+1)*self.batchSize], align_right=True)
if self.tgt:
tgtBatch = self._batchify(
self.tgt[index*self.batchSize:(index+1)*self.batchSize])
else:
tgtBatch = None
return srcBatch, tgtBatch
def __len__(self):
return self.numBatches
def shuffle(self):
zipped = list(zip(self.src, self.tgt))
random.shuffle(zipped)
self.src, self.tgt = [x[0] for x in zipped], [x[1] for x in zipped]
| bmccann/examples | OpenNMT/onmt/Dataset.py | Python | bsd-3-clause | 1,722 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import AffineInitializer
def test_AffineInitializer_inputs():
input_map = dict(args=dict(argstr='%s',
),
dimension=dict(argstr='%s',
position=0,
usedefault=True,
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_image=dict(argstr='%s',
mandatory=True,
position=1,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
local_search=dict(argstr='%d',
position=7,
usedefault=True,
),
moving_image=dict(argstr='%s',
mandatory=True,
position=2,
),
num_threads=dict(nohash=True,
usedefault=True,
),
out_file=dict(argstr='%s',
position=3,
usedefault=True,
),
principal_axes=dict(argstr='%d',
position=6,
usedefault=True,
),
radian_fraction=dict(argstr='%f',
position=5,
usedefault=True,
),
search_factor=dict(argstr='%f',
position=4,
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = AffineInitializer.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_AffineInitializer_outputs():
output_map = dict(out_file=dict(),
)
outputs = AffineInitializer.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| mick-d/nipype | nipype/interfaces/ants/tests/test_auto_AffineInitializer.py | Python | bsd-3-clause | 1,633 |
name = "neurogenesis"
| juliusf/Neurogenesis | neurogenesis/__init__.py | Python | bsd-3-clause | 22 |
Subsets and Splits