text
stringlengths 4
1.02M
| meta
dict |
---|---|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Factoid(Base):
__tablename__ = 'factoids'
id = Column(Integer, primary_key=True)
keyword = Column(String, unique=True)
response = Column(String)
createdby = Column(String, default="")
alteredby = Column(String, nullable=True, default="")
locked = Column(Boolean, nullable=True, default=False)
lockedby = Column(String, nullable=True, default="")
def __init__(self, key, response, createdby="", alteredby=None, locked=False, lockedby=None):
self.keyword = key
self.response = response
self.createdby = createdby
self.alteredby = alteredby
self.locked = locked
self.lockedby = lockedby
def getResponse(self):
return self.response
def getKey(self):
return self.keyword
def __repr__(self):
return "<Factoid('%s', '%s', '%s', '%s', %s, '%s')>" % \
(self.getKey(), self.getResponse(), self.createdby, self.alteredby, self.locked, self.lockedby)
class FactoidException(Exception): pass
class FactoidAlreadyExists(FactoidException): pass
class NoSuchFactoid(FactoidException): pass
class FactoidManager(object):
def __init__(self, db=None):
if not db:
db = "/:memory:"
self.engine = create_engine('sqlite:///%s' % db)
Base.metadata.create_all(self.engine)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def save(self):
self.session.commit()
def allFacts(self):
return self.session.query(Factoid)
def addFact(self, key, response, replace=False):
key = key.lower()
exists = self.session.query(Factoid).filter_by(keyword=key)
if exists:
if not replace:
raise FactoidAlreadyExists, key
if exists.count():
fact = exists.first()
fact.response = response
else:
fact = Factoid(key, response)
self.session.add(fact)
self.session.commit()
return fact
def updateFact(self, key, response):
return self.addFact(key, response, True)
def getFact(self, key):
key = key.lower()
exists = self.session.query(Factoid).filter_by(keyword=key)
if not exists.count():
raise NoSuchFactoid, key
fact = exists.first()
return fact
def remFact(self, key):
exists = self.session.query(Factoid).filter_by(keyword=key)
if not exists.count():
raise NoSuchFactoid, key
self.session.delete(exists.first())
self.session.commit() | {
"content_hash": "22896315a1216a0a772fe42360958ae9",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 107,
"avg_line_length": 31.145833333333332,
"alnum_prop": 0.5806020066889632,
"repo_name": "EntityReborn/SocBot",
"id": "a72cbbe87046b09f3ddd0a363f2ee3a4dc12197f",
"size": "2990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/factoidbase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201344"
}
],
"symlink_target": ""
} |
import project_procurement
import project_mrp
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "2176a83213bf847c2cec4472e62c5b66",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 65,
"avg_line_length": 28.25,
"alnum_prop": 0.831858407079646,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "df4f0fa8a52af54f821964d3d80baff39993cbc5",
"size": "1092",
"binary": false,
"copies": "67",
"ref": "refs/heads/master",
"path": "openerp/addons/project_mrp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
import operator
from sys import version_info
from unittest import TestCase
from xml.etree import ElementTree
from genologics.entities import StepActions, Researcher, Artifact, \
Step, StepPlacements, StepPools, Container, Stage, ReagentKit, ReagentLot, Sample, Project
from genologics.lims import Lims
if version_info[0] == 2:
from mock import patch, Mock
else:
from unittest.mock import patch, Mock
url = 'http://testgenologics.com:4040'
########
# Entities in XML
generic_artifact_xml = """<?xml version='1.0' encoding='utf-8'?>
<art:artifact xmlns:art="http://genologics.com/ri/artifact" xmlns:file="http://genologics.com/ri/file" xmlns:udf="http://genologics.com/ri/userdefined" uri="{url}/api/v2/artifacts/a1" limsid="a1">
<name>test_sample1</name>
<type>Analyte</type>
<output-type>Analyte</output-type>
<qc-flag>PASSED</qc-flag>
<location>
<container uri="{url}/api/v2/containers/c1" limsid="c1"/>
<value>A:1</value>
</location>
<working-flag>true</working-flag>
<sample uri="{url}/api/v2/samples/s1" limsid="s1"/>
<udf:field type="Numeric" name="Ave. Conc. (ng/uL)">1</udf:field>
<udf:field type="String" name="Workflow Desired">TruSeq Nano DNA Sample Prep</udf:field>
<workflow-stages>
<workflow-stage status="QUEUED" name="Test workflow s2" uri="{url}/api/v2/configuration/workflows/1/stages/2"/>
<workflow-stage status="COMPLETE" name="Test workflow s1" uri="{url}/api/v2/configuration/workflows/1/stages/1"/>
</workflow-stages>
</art:artifact>"""
generic_step_pools_xml = """<?xml version='1.0' encoding='utf-8'?>
<stp:pools xmlns:stp="http://genologics.com/ri/step" uri="{url}/steps/s1/pools">
<step uri="{url}/api/v2/steps/122-28760" rel="steps"/>
<configuration uri="{url}/api/v2/configuration/protocols/467/steps/619">
Step name
</configuration>
<pooled-inputs>
<pool output-uri="{url}/api/v2/artifacts/o1" name="Pool #1">
<input uri="{url}/api/v2/artifacts/a1"/>
<input uri="{url}/api/v2/artifacts/a2"/>
</pool>
</pooled-inputs>
<available-inputs>
<input replicates="1" uri="{url}/api/v2/artifacts/a3"/>
<input replicates="1" uri="{url}/api/v2/artifacts/a4"/>
<input replicates="1" uri="{url}/api/v2/artifacts/a5"/>
</available-inputs>
</stp:pools>
"""
generic_step_placements_xml = """<?xml version='1.0' encoding='utf-8'?>
<stp:placements xmlns:stp="http://genologics.com/ri/step" uri="{url}/steps/s1/placements">
<step uri="{url}/steps/s1" />
<configuration uri="{url}/configuration/protocols/1/steps/1">Step name</configuration>
<selected-containers>
<container uri="{url}/containers/{container}" />
</selected-containers>
<output-placements>
<output-placement uri="{url}/artifacts/a1">
<location>
<container limsid="{container}" uri="{url}/containers/{container}" />
<value>{loc1}</value>
</location>
</output-placement>
<output-placement uri="{url}/artifacts/a2">
<location>
<container limsid="{container}" uri="{url}/containers/{container}" />
<value>{loc2}</value>
</location>
</output-placement>
</output-placements>
</stp:placements>"""
generic_reagentkit_xml = """<?xml version='1.0' encoding='utf-8'?>
<kit:reagent-kit xmlns:kit="http://genologics.com/ri/reagentkit" uri="{url}:8080/api/v2/reagentkits/r1">
<name>regaentkitname</name>
<supplier>reagentProvider</supplier>
<website>www.reagentprovider.com</website>
<archived>false</archived>
</kit:reagent-kit>"""
generic_reagentlot_xml = """<?xml version='1.0' encoding='utf-8'?>
<lot:reagent-lot xmlns:lot="http://genologics.com/ri/reagentlot" limsid="l1" uri="{url}/api/v2/reagentlots/l1">
<reagent-kit uri="{url}/api/v2/reagentkits/r1" name="kitname"/>
<name>kitname</name>
<lot-number>100</lot-number>
<created-date>2015-07-16</created-date>
<last-modified-date>2015-08-17</last-modified-date>
<expiry-date>2022-08-16</expiry-date>
<created-by uri="{url}/api/v2/researchers/1"/>
<last-modified-by uri="{url}/api/v2/researchers/1"/>
<status>ARCHIVED</status>
<usage-count>1</usage-count>
</lot:reagent-lot>"""
generic_step_actions_xml = """<stp:actions xmlns:stp="http://genologics.com/ri/step" uri="...">
<step rel="..." uri="{url}/steps/s1">
</step>
<configuration uri="{url}/config/1">...</configuration>
<next-actions>
<next-action artifact-uri="{url}/artifacts/a1" action="requeue" step-uri="..." rework-step-uri="...">
</next-action>
</next-actions>
<escalation>
<request>
<author uri="{url}/researchers/r1">
<first-name>foo</first-name>
<last-name>bar</last-name>
</author>
<reviewer uri="{url}/researchers/r1">
<first-name>foo</first-name>
<last-name>bar</last-name>
</reviewer>
<date>01-01-1970</date>
<comment>no comments</comment>
</request>
<review>
<author uri="{url}/researchers/r1">
<first-name>foo</first-name>
<last-name>bar</last-name>
</author>
<date>01-01-1970</date>
<comment>no comments</comment>
</review>
<escalated-artifacts>
<escalated-artifact uri="{url}/artifacts/r1">
</escalated-artifact>
</escalated-artifacts>
</escalation>
</stp:actions>"""
generic_step_actions_no_escalation_xml = """<stp:actions xmlns:stp="http://genologics.com/ri/step" uri="...">
<step rel="..." uri="{url}/steps/s1">
</step>
<configuration uri="{url}/config/1">...</configuration>
<next-actions>
<next-action artifact-uri="{url}/artifacts/a1" action="requeue" step-uri="{url}/steps/s1" rework-step-uri="{url}/steps/s2">
</next-action>
</next-actions>
</stp:actions>"""
generic_sample_creation_xml = """
<smp:samplecreation xmlns:smp="http://genologics.com/ri/sample" limsid="s1" uri="{url}/api/v2/samples/s1">
<location>
<container limsid="cont1" uri="{url}/api/v2/containers/cont1">
</container>
<value>1:1</value>
</location>
<name>
sample1
</name>
<project uri="{url}/api/v2/projects/p1" limsid="p1">
</project>
</smp:samplecreation>
"""
class TestEntities(TestCase):
def test_pass(self):
pass
def elements_equal(e1, e2):
if e1.tag != e2.tag:
print('Tag: %s != %s' % (e1.tag, e2.tag))
return False
if e1.text and e2.text and e1.text.strip() != e2.text.strip():
print('Text: %s != %s' % (e1.text.strip(), e2.text.strip()))
return False
if e1.tail and e2.tail and e1.tail.strip() != e2.tail.strip():
print('Tail: %s != %s' % (e1.tail.strip(), e2.tail.strip()))
return False
if e1.attrib != e2.attrib:
print('Attrib: %s != %s' % (e1.attrib, e2.attrib))
return False
if len(e1) != len(e2):
print('length %s (%s) != length (%s) ' % (e1.tag, len(e1), e2.tag, len(e2)))
return False
return all(
elements_equal(c1, c2) for c1, c2 in zip(sorted(e1, key=lambda x: x.tag), sorted(e2, key=lambda x: x.tag)))
class TestEntities(TestCase):
dummy_xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<dummy></dummy>"""
def setUp(self):
self.lims = Lims(url, username='test', password='password')
def _tostring(self, entity):
return self.lims.tostring(ElementTree.ElementTree(entity.root)).decode("utf-8")
class TestStepActions(TestEntities):
step_actions_xml = generic_step_actions_xml.format(url=url)
step_actions_no_escalation_xml = generic_step_actions_no_escalation_xml.format(url=url)
def test_escalation(self):
s = StepActions(uri=self.lims.get_uri('steps', 'step_id', 'actions'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.step_actions_xml, status_code=200)):
with patch('requests.post', return_value=Mock(content=self.dummy_xml, status_code=200)):
r = Researcher(uri='http://testgenologics.com:4040/researchers/r1', lims=self.lims)
a = Artifact(uri='http://testgenologics.com:4040/artifacts/r1', lims=self.lims)
expected_escalation = {
'status': 'Reviewed',
'author': r,
'artifacts': [a], 'request': 'no comments',
'answer': 'no comments',
'reviewer': r}
assert s.escalation == expected_escalation
def test_next_actions(self):
s = StepActions(uri=self.lims.get_uri('steps', 'step_id', 'actions'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.step_actions_no_escalation_xml, status_code=200)):
step1 = Step(self.lims, uri='http://testgenologics.com:4040/steps/s1')
step2 = Step(self.lims, uri='http://testgenologics.com:4040/steps/s2')
artifact = Artifact(self.lims, uri='http://testgenologics.com:4040/artifacts/a1')
expected_next_actions = [{'artifact': artifact, 'action': 'requeue',
'step': step1, 'rework-step': step2}]
assert s.next_actions == expected_next_actions
class TestStepPools(TestEntities):
initial_step_pools = generic_step_pools_xml.format(url=url)
def test_get_pool_list(self):
s = StepPools(uri=self.lims.get_uri('steps', 's1', 'pools'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.initial_step_pools, status_code=200)):
output = Artifact(lims=self.lims, uri='http://testgenologics.com:4040/api/v2/artifacts/o1')
i1 = Artifact(lims=self.lims, uri='http://testgenologics.com:4040/api/v2/artifacts/a1')
i2 = Artifact(lims=self.lims, uri='http://testgenologics.com:4040/api/v2/artifacts/a2')
i3 = Artifact(lims=self.lims, uri='http://testgenologics.com:4040/api/v2/artifacts/a3')
i4 = Artifact(lims=self.lims, uri='http://testgenologics.com:4040/api/v2/artifacts/a4')
i5 = Artifact(lims=self.lims, uri='http://testgenologics.com:4040/api/v2/artifacts/a5')
assert s.pools[0]['output'] == output
assert s.pools[0]['name'] == "Pool #1"
assert len(s.pools[0]['inputs']) == 2
assert s.pools[0]['inputs'][0] == i1
assert s.pools[0]['inputs'][1] == i2
assert i3 in s.available_inputs
assert i4 in s.available_inputs
assert i5 in s.available_inputs
class TestStepPlacements(TestEntities):
original_step_placements_xml = generic_step_placements_xml.format(url=url, container="c1", loc1='1:1', loc2='2:1')
modloc_step_placements_xml = generic_step_placements_xml.format(url=url, container="c1", loc1='3:1', loc2='4:1')
modcont_step_placements_xml = generic_step_placements_xml.format(url=url, container="c2", loc1='1:1', loc2='1:1')
def test_get_placements_list(self):
s = StepPlacements(uri=self.lims.get_uri('steps', 's1', 'placements'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.original_step_placements_xml, status_code=200)):
a1 = Artifact(uri='http://testgenologics.com:4040/artifacts/a1', lims=self.lims)
a2 = Artifact(uri='http://testgenologics.com:4040/artifacts/a2', lims=self.lims)
c1 = Container(uri='http://testgenologics.com:4040/containers/c1', lims=self.lims)
expected_placements = [[a1, (c1, '1:1')], [a2, (c1, '2:1')]]
assert s.get_placement_list() == expected_placements
def test_set_placements_list(self):
a1 = Artifact(uri='http://testgenologics.com:4040/artifacts/a1', lims=self.lims)
a2 = Artifact(uri='http://testgenologics.com:4040/artifacts/a2', lims=self.lims)
c1 = Container(uri='http://testgenologics.com:4040/containers/c1', lims=self.lims)
c2 = Container(uri='http://testgenologics.com:4040/containers/c2', lims=self.lims)
s = StepPlacements(uri=self.lims.get_uri('steps', 's1', 'placements'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.original_step_placements_xml, status_code=200)):
new_placements = [[a1, (c1, '3:1')], [a2, (c1, '4:1')]]
s.set_placement_list(new_placements)
assert elements_equal(s.root, ElementTree.fromstring(self.modloc_step_placements_xml))
def test_set_placements_list_fail(self):
a1 = Artifact(uri='http://testgenologics.com:4040/artifacts/a1', lims=self.lims)
a2 = Artifact(uri='http://testgenologics.com:4040/artifacts/a2', lims=self.lims)
c2 = Container(uri='http://testgenologics.com:4040/containers/c2', lims=self.lims)
s = StepPlacements(uri=self.lims.get_uri('steps', 's1', 'placements'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.original_step_placements_xml, status_code=200)):
new_placements = [[a1, (c2, '1:1')], [a2, (c2, '1:1')]]
s.set_placement_list(new_placements)
assert elements_equal(s.root, ElementTree.fromstring(self.modcont_step_placements_xml))
class TestArtifacts(TestEntities):
root_artifact_xml = generic_artifact_xml.format(url=url)
def test_input_artifact_list(self):
a = Artifact(uri=self.lims.get_uri('artifacts', 'a1'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.root_artifact_xml, status_code=200)):
assert a.input_artifact_list() == []
def test_workflow_stages_and_statuses(self):
a = Artifact(uri=self.lims.get_uri('artifacts', 'a1'), lims=self.lims)
expected_wf_stage = [
(Stage(self.lims, uri=url + '/api/v2/configuration/workflows/1/stages/2'), 'QUEUED', 'Test workflow s2'),
(Stage(self.lims, uri=url + '/api/v2/configuration/workflows/1/stages/1'), 'COMPLETE', 'Test workflow s1')
]
with patch('requests.Session.get', return_value=Mock(content=self.root_artifact_xml, status_code=200)):
assert a.workflow_stages_and_statuses == expected_wf_stage
class TestReagentKits(TestEntities):
url = 'http://testgenologics.com:4040'
reagentkit_xml = generic_reagentkit_xml.format(url=url)
def test_parse_entity(self):
r = ReagentKit(uri=self.lims.get_uri('reagentkits', 'r1'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.reagentkit_xml, status_code=200)):
assert r.name == 'regaentkitname'
assert r.supplier == 'reagentProvider'
assert r.website == 'www.reagentprovider.com'
assert r.archived == False
def test_create_entity(self):
with patch('genologics.lims.requests.post', return_value=Mock(content=self.reagentkit_xml, status_code=201)):
r = ReagentKit.create(self.lims, name='regaentkitname', supplier='reagentProvider',
website='www.reagentprovider.com', archived=False)
self.assertRaises(TypeError, ReagentKit.create, self.lims, error='test')
class TestReagentLots(TestEntities):
reagentlot_xml = generic_reagentlot_xml.format(url=url)
reagentkit_xml = generic_reagentkit_xml.format(url=url)
def test_parse_entity(self):
l = ReagentLot(uri=self.lims.get_uri('reagentkits', 'r1'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.reagentlot_xml, status_code=200)):
assert l.uri
assert l.name == 'kitname'
assert l.lot_number == '100'
assert l.status == 'ARCHIVED'
def test_create_entity(self):
with patch('requests.Session.get', return_value=Mock(content=self.reagentkit_xml, status_code=200)):
r = ReagentKit(uri=self.lims.get_uri('reagentkits', 'r1'), lims=self.lims)
with patch('genologics.lims.requests.post',
return_value=Mock(content=self.reagentlot_xml, status_code=201)) as patch_post:
l = ReagentLot.create(
self.lims,
reagent_kit=r,
name='kitname',
lot_number='100',
expiry_date='2020-05-01',
status='ACTIVE'
)
assert l.uri
assert l.name == 'kitname'
assert l.lot_number == '100'
class TestSample(TestEntities):
sample_creation = generic_sample_creation_xml.format(url=url)
def test_create_entity(self):
with patch('genologics.lims.requests.post',
return_value=Mock(content=self.sample_creation, status_code=201)) as patch_post:
l = Sample.create(
self.lims,
project=Project(self.lims, uri='project'),
container=Container(self.lims, uri='container'),
position='1:1',
name='s1',
)
data = '''<?xml version=\'1.0\' encoding=\'utf-8\'?>
<smp:samplecreation xmlns:smp="http://genologics.com/ri/sample">
<name>s1</name>
<project uri="project" limsid="project" />
<location>
<container uri="container" />
<value>1:1</value>
</location>
</smp:samplecreation>'''
assert elements_equal(ElementTree.fromstring(patch_post.call_args_list[0][1]['data']),
ElementTree.fromstring(data))
| {
"content_hash": "d67e2938772caf4fc7825d5f149b4ae3",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 198,
"avg_line_length": 44.91968911917098,
"alnum_prop": 0.6330814925889613,
"repo_name": "SciLifeLab/genologics",
"id": "103abb40b0a02d11e391b4526af622df7f5ac1fe",
"size": "17339",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_entities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "168019"
}
],
"symlink_target": ""
} |
from ....testing import assert_equal
from ..minc import Extract
def test_Extract_inputs():
input_map = dict(args=dict(argstr='%s',
),
count=dict(argstr='-count %s',
sep=',',
),
environ=dict(nohash=True,
usedefault=True,
),
flip_any_direction=dict(argstr='-any_direction',
xor=(u'flip_positive_direction', u'flip_negative_direction', u'flip_any_direction'),
),
flip_negative_direction=dict(argstr='-negative_direction',
xor=(u'flip_positive_direction', u'flip_negative_direction', u'flip_any_direction'),
),
flip_positive_direction=dict(argstr='-positive_direction',
xor=(u'flip_positive_direction', u'flip_negative_direction', u'flip_any_direction'),
),
flip_x_any=dict(argstr='-xanydirection',
xor=(u'flip_x_positive', u'flip_x_negative', u'flip_x_any'),
),
flip_x_negative=dict(argstr='-xdirection',
xor=(u'flip_x_positive', u'flip_x_negative', u'flip_x_any'),
),
flip_x_positive=dict(argstr='+xdirection',
xor=(u'flip_x_positive', u'flip_x_negative', u'flip_x_any'),
),
flip_y_any=dict(argstr='-yanydirection',
xor=(u'flip_y_positive', u'flip_y_negative', u'flip_y_any'),
),
flip_y_negative=dict(argstr='-ydirection',
xor=(u'flip_y_positive', u'flip_y_negative', u'flip_y_any'),
),
flip_y_positive=dict(argstr='+ydirection',
xor=(u'flip_y_positive', u'flip_y_negative', u'flip_y_any'),
),
flip_z_any=dict(argstr='-zanydirection',
xor=(u'flip_z_positive', u'flip_z_negative', u'flip_z_any'),
),
flip_z_negative=dict(argstr='-zdirection',
xor=(u'flip_z_positive', u'flip_z_negative', u'flip_z_any'),
),
flip_z_positive=dict(argstr='+zdirection',
xor=(u'flip_z_positive', u'flip_z_negative', u'flip_z_any'),
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
image_maximum=dict(argstr='-image_maximum %s',
),
image_minimum=dict(argstr='-image_minimum %s',
),
image_range=dict(argstr='-image_range %s %s',
),
input_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
nonormalize=dict(argstr='-nonormalize',
xor=(u'normalize', u'nonormalize'),
),
normalize=dict(argstr='-normalize',
xor=(u'normalize', u'nonormalize'),
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
output_file=dict(hash_files=False,
keep_extension=False,
name_source=[u'input_file'],
name_template='%s.raw',
position=-1,
),
start=dict(argstr='-start %s',
sep=',',
),
terminal_output=dict(nohash=True,
),
write_ascii=dict(argstr='-ascii',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_byte=dict(argstr='-byte',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_double=dict(argstr='-double',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_float=dict(argstr='-float',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_int=dict(argstr='-int',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_long=dict(argstr='-long',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_range=dict(argstr='-range %s %s',
),
write_short=dict(argstr='-short',
xor=(u'write_ascii', u'write_ascii', u'write_byte', u'write_short', u'write_int', u'write_long', u'write_float', u'write_double', u'write_signed', u'write_unsigned'),
),
write_signed=dict(argstr='-signed',
xor=(u'write_signed', u'write_unsigned'),
),
write_unsigned=dict(argstr='-unsigned',
xor=(u'write_signed', u'write_unsigned'),
),
)
inputs = Extract.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Extract_outputs():
output_map = dict(output_file=dict(),
)
outputs = Extract.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "2835c494d455d9753e3cf66343c49dfc",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 170,
"avg_line_length": 38.109375,
"alnum_prop": 0.6252562525625256,
"repo_name": "carolFrohlich/nipype",
"id": "4b634a76757ddcc70103dd8e34033ec12e5ca559",
"size": "4932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/minc/tests/test_auto_Extract.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""Internal class for endpoint discovery retry policy implementation in the Azure Cosmos database service.
"""
import logging
from azure.cosmos.documents import _OperationType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
log_formatter = logging.Formatter('%(levelname)s:%(message)s')
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
class _EndpointDiscoveryRetryPolicy(object):
"""The endpoint discovery retry policy class used for geo-replicated database accounts
to handle the write forbidden exceptions due to writable/readable location changes
(say, after a failover).
"""
Max_retry_attempt_count = 120
Retry_after_in_milliseconds = 1000
def __init__(self, connection_policy, global_endpoint_manager, *args):
self.global_endpoint_manager = global_endpoint_manager
self._max_retry_attempt_count = _EndpointDiscoveryRetryPolicy.Max_retry_attempt_count
self.failover_retry_count = 0
self.retry_after_in_milliseconds = _EndpointDiscoveryRetryPolicy.Retry_after_in_milliseconds
self.connection_policy = connection_policy
self.request = args[0] if args else None
#clear previous location-based routing directive
if (self.request):
self.request.clear_route_to_location()
# Resolve the endpoint for the request and pin the resolution to the resolved endpoint
# This enables marking the endpoint unavailability on endpoint failover/unreachability
self.location_endpoint = self.global_endpoint_manager.resolve_service_endpoint(self.request)
self.request.route_to_location(self.location_endpoint)
def ShouldRetry(self, exception):
"""Returns true if should retry based on the passed-in exception.
:param (errors.HTTPFailure instance) exception:
:rtype:
boolean
"""
if not self.connection_policy.EnableEndpointDiscovery:
return False
if self.failover_retry_count >= self.Max_retry_attempt_count:
return False
self.failover_retry_count += 1
if self.location_endpoint:
if _OperationType.IsReadOnlyOperation(self.request.operation_type):
#Mark current read endpoint as unavailable
self.global_endpoint_manager.mark_endpoint_unavailable_for_read(self.location_endpoint)
else:
self.global_endpoint_manager.mark_endpoint_unavailable_for_write(self.location_endpoint)
# set the refresh_needed flag to ensure that endpoint list is
# refreshed with new writable and readable locations
self.global_endpoint_manager.refresh_needed = True
# clear previous location-based routing directive
self.request.clear_route_to_location()
# set location-based routing directive based on retry count
# simulating single master writes by ensuring usePreferredLocations
# is set to false
self.request.route_to_location_with_preferred_location_flag(self.failover_retry_count, False)
# Resolve the endpoint for the request and pin the resolution to the resolved endpoint
# This enables marking the endpoint unavailability on endpoint failover/unreachability
self.location_endpoint = self.global_endpoint_manager.resolve_service_endpoint(self.request)
self.request.route_to_location(self.location_endpoint)
return True
| {
"content_hash": "f720be9adb6b2c069d79dbf4ed6a58c5",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 106,
"avg_line_length": 44.3125,
"alnum_prop": 0.7063469675599435,
"repo_name": "Azure/azure-documentdb-python",
"id": "dbc1e41f845f76353e98e39af2454e714a65d363",
"size": "4650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure/cosmos/endpoint_discovery_retry_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "663705"
}
],
"symlink_target": ""
} |
import abc
import collections
import imp
import os
from oslo_config import cfg
from oslo_log import log as logging
import routes
import six
import webob.dec
import webob.exc
from neutron.common import exceptions
from neutron.common import repos
import neutron.extensions
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron import wsgi
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class PluginInterface(object):
@classmethod
def __subclasshook__(cls, klass):
"""Checking plugin class.
The __subclasshook__ method is a class method
that will be called every time a class is tested
using issubclass(klass, PluginInterface).
In that case, it will check that every method
marked with the abstractmethod decorator is
provided by the plugin class.
"""
if not cls.__abstractmethods__:
return NotImplemented
for method in cls.__abstractmethods__:
if any(method in base.__dict__ for base in klass.__mro__):
continue
return NotImplemented
return True
@six.add_metaclass(abc.ABCMeta)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions."""
def get_name(self):
"""The name of the extension.
e.g. 'Fox In Socks'
"""
raise NotImplementedError()
def get_alias(self):
"""The alias for the extension.
e.g. 'FOXNSOX'
"""
raise NotImplementedError()
def get_description(self):
"""Friendly description for the extension.
e.g. 'The Fox In Socks Extension'
"""
raise NotImplementedError()
def get_updated(self):
"""The timestamp when the extension was last updated.
e.g. '2011-01-22T13:25:27-06:00'
"""
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
raise NotImplementedError()
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_actions(self):
"""List of extensions.ActionExtension extension objects.
Actions are verbs callable from the API.
"""
actions = []
return actions
def get_request_extensions(self):
"""List of extensions.RequestException extension objects.
Request extensions are used to handle custom request data.
"""
request_exts = []
return request_exts
def get_extended_resources(self, version):
"""Retrieve extended resources or attributes for core resources.
Extended attributes are implemented by a core plugin similarly
to the attributes defined in the core, and can appear in
request and response messages. Their names are scoped with the
extension's prefix. The core API version is passed to this
function, which must return a
map[<resource_name>][<attribute_name>][<attribute_property>]
specifying the extended resource attribute properties required
by that API version.
Extension can add resources and their attr definitions too.
The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP.
"""
return {}
def get_plugin_interface(self):
"""Returns an abstract class which defines contract for the plugin.
The abstract class should inherit from extensions.PluginInterface,
Methods in this abstract class should be decorated as abstractmethod
"""
return None
def update_attributes_map(self, extended_attributes,
extension_attrs_map=None):
"""Update attributes map for this extension.
This is default method for extending an extension's attributes map.
An extension can use this method and supplying its own resource
attribute map in extension_attrs_map argument to extend all its
attributes that needs to be extended.
If an extension does not implement update_attributes_map, the method
does nothing and just return.
"""
if not extension_attrs_map:
return
for resource, attrs in six.iteritems(extension_attrs_map):
extended_attrs = extended_attributes.get(resource)
if extended_attrs:
attrs.update(extended_attrs)
class ActionExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, request, id):
input_dict = self._deserialize(request.body,
request.get_content_type())
for action_name, handler in six.iteritems(self.action_handlers):
if action_name in input_dict:
return handler(input_dict, request, id)
# no action handler found (bump to downstream application)
response = self.application
return response
class RequestExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, request, *args, **kwargs):
res = request.get_response(self.application)
# currently request handlers are un-ordered
for handler in self.handlers:
response = handler(request, res)
return response
class ExtensionController(wsgi.Controller):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, request):
extensions = []
for _alias, ext in six.iteritems(self.extension_manager.extensions):
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, request, id):
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions.get(id, None)
if not ext:
raise webob.exc.HTTPNotFound(
_("Extension with alias %s does not exist") % id)
return dict(extension=self._translate(ext))
def delete(self, request, id):
msg = _('Resource not found.')
raise webob.exc.HTTPNotFound(msg)
def create(self, request):
msg = _('Resource not found.')
raise webob.exc.HTTPNotFound(msg)
class ExtensionMiddleware(wsgi.Middleware):
"""Extensions middleware for WSGI."""
def __init__(self, application,
ext_mgr=None):
self.ext_mgr = (ext_mgr
or ExtensionManager(get_extensions_path()))
mapper = routes.Mapper()
# extended resources
for resource in self.ext_mgr.get_resources():
path_prefix = resource.path_prefix
if resource.parent:
path_prefix = (resource.path_prefix +
"/%s/{%s_id}" %
(resource.parent["collection_name"],
resource.parent["member_name"]))
LOG.debug('Extended resource: %s',
resource.collection)
for action, method in six.iteritems(resource.collection_actions):
conditions = dict(method=[method])
path = "/%s/%s" % (resource.collection, action)
with mapper.submapper(controller=resource.controller,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path)
submap.connect("%s.:(format)" % path)
mapper.resource(resource.collection, resource.collection,
controller=resource.controller,
member=resource.member_actions,
parent_resource=resource.parent,
path_prefix=path_prefix)
# extended actions
action_controllers = self._action_ext_controllers(application,
self.ext_mgr, mapper)
for action in self.ext_mgr.get_actions():
LOG.debug('Extended action: %s', action.action_name)
controller = action_controllers[action.collection]
controller.add_action(action.action_name, action.handler)
# extended requests
req_controllers = self._request_ext_controllers(application,
self.ext_mgr, mapper)
for request_ext in self.ext_mgr.get_request_extensions():
LOG.debug('Extended request: %s', request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
super(ExtensionMiddleware, self).__init__(application)
@classmethod
def factory(cls, global_config, **local_config):
"""Paste factory."""
def _factory(app):
return cls(app, global_config, **local_config)
return _factory
def _action_ext_controllers(self, application, ext_mgr, mapper):
"""Return a dict of ActionExtensionController-s by collection."""
action_controllers = {}
for action in ext_mgr.get_actions():
if action.collection not in action_controllers.keys():
controller = ActionExtensionController(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
action_controllers[action.collection] = controller
return action_controllers
def _request_ext_controllers(self, application, ext_mgr, mapper):
"""Returns a dict of RequestExtensionController-s by collection."""
request_ext_controllers = {}
for req_ext in ext_mgr.get_request_extensions():
if req_ext.key not in request_ext_controllers.keys():
controller = RequestExtensionController(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=controller,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
controller=controller,
conditions=req_ext.conditions)
request_ext_controllers[req_ext.key] = controller
return request_ext_controllers
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Route the incoming request with router."""
req.environ['extended.app'] = self.application
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=wsgi.Request)
def _dispatch(req):
"""Dispatch the request.
Returns the routed WSGI app's response or defers to the extended
application.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return req.environ['extended.app']
app = match['controller']
return app
def plugin_aware_extension_middleware_factory(global_config, **local_config):
"""Paste factory."""
def _factory(app):
ext_mgr = PluginAwareExtensionManager.get_instance()
return ExtensionMiddleware(app, ext_mgr=ext_mgr)
return _factory
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See tests/unit/extensions/foxinsocks.py for an
example extension implementation.
"""
def __init__(self, path):
LOG.info(_LI('Initializing extension manager.'))
self.path = path
self.extensions = {}
self._load_all_extensions()
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionController(self)))
for ext in self.extensions.values():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_actions(self):
"""Returns a list of ActionExtension objects."""
actions = []
for ext in self.extensions.values():
try:
actions.extend(ext.get_actions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have action
# extensions
pass
return actions
def get_request_extensions(self):
"""Returns a list of RequestExtension objects."""
request_exts = []
for ext in self.extensions.values():
try:
request_exts.extend(ext.get_request_extensions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have request
# extensions
pass
return request_exts
def extend_resources(self, version, attr_map):
"""Extend resources with additional resources or attributes.
:param: attr_map, the existing mapping from resource name to
attrs definition.
After this function, we will extend the attr_map if an extension
wants to extend this map.
"""
update_exts = []
processed_exts = set()
exts_to_process = self.extensions.copy()
# Iterate until there are unprocessed extensions or if no progress
# is made in a whole iteration
while exts_to_process:
processed_ext_count = len(processed_exts)
for ext_name, ext in list(exts_to_process.items()):
if not hasattr(ext, 'get_extended_resources'):
del exts_to_process[ext_name]
continue
if hasattr(ext, 'update_attributes_map'):
update_exts.append(ext)
if hasattr(ext, 'get_required_extensions'):
# Process extension only if all required extensions
# have been processed already
required_exts_set = set(ext.get_required_extensions())
if required_exts_set - processed_exts:
continue
try:
extended_attrs = ext.get_extended_resources(version)
for res, resource_attrs in six.iteritems(extended_attrs):
attr_map.setdefault(res, {}).update(resource_attrs)
except AttributeError:
LOG.exception(_LE("Error fetching extended attributes for "
"extension '%s'"), ext.get_name())
processed_exts.add(ext_name)
del exts_to_process[ext_name]
if len(processed_exts) == processed_ext_count:
# Exit loop as no progress was made
break
if exts_to_process:
# NOTE(salv-orlando): Consider whether this error should be fatal
LOG.error(_LE("It was impossible to process the following "
"extensions: %s because of missing requirements."),
','.join(exts_to_process.keys()))
# Extending extensions' attributes map.
for ext in update_exts:
ext.update_attributes_map(attr_map)
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.get_name())
LOG.debug('Ext alias: %s', extension.get_alias())
LOG.debug('Ext description: %s', extension.get_description())
LOG.debug('Ext updated: %s', extension.get_updated())
except AttributeError as ex:
LOG.exception(_LE("Exception loading extension: %s"),
six.text_type(ex))
return False
return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
The extension name is constructed from the module_name. If your
extension module is named widgets.py, the extension class within that
module should be 'Widgets'.
See tests/unit/extensions/foxinsocks.py for an example extension
implementation.
"""
for path in self.path.split(':'):
if os.path.exists(path):
self._load_all_extensions_from_path(path)
else:
LOG.error(_LE("Extension path '%s' doesn't exist!"), path)
def _load_all_extensions_from_path(self, path):
# Sorting the extension list makes the order in which they
# are loaded predictable across a cluster of load-balanced
# Neutron Servers
for f in sorted(os.listdir(path)):
try:
LOG.debug('Loading extension file: %s', f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warn(_LW('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warn(_LW("Extension file %(f)s wasn't loaded due to "
"%(exception)s"),
{'f': f, 'exception': exception})
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.get_alias()
LOG.info(_LI('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exceptions.DuplicatedExtension(alias=alias)
self.extensions[alias] = ext
class PluginAwareExtensionManager(ExtensionManager):
_instance = None
def __init__(self, path, plugins):
self.plugins = plugins
super(PluginAwareExtensionManager, self).__init__(path)
self.check_if_plugin_extensions_loaded()
def _check_extension(self, extension):
"""Check if an extension is supported by any plugin."""
extension_is_valid = super(PluginAwareExtensionManager,
self)._check_extension(extension)
return (extension_is_valid and
self._plugins_support(extension) and
self._plugins_implement_interface(extension))
def _plugins_support(self, extension):
alias = extension.get_alias()
supports_extension = alias in self.get_supported_extension_aliases()
if not supports_extension:
LOG.warn(_LW("Extension %s not supported by any of loaded "
"plugins"),
alias)
return supports_extension
def _plugins_implement_interface(self, extension):
if(not hasattr(extension, "get_plugin_interface") or
extension.get_plugin_interface() is None):
return True
for plugin in self.plugins.values():
if isinstance(plugin, extension.get_plugin_interface()):
return True
LOG.warn(_LW("Loaded plugins do not implement extension %s interface"),
extension.get_alias())
return False
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls(get_extensions_path(),
manager.NeutronManager.get_service_plugins())
return cls._instance
def get_supported_extension_aliases(self):
"""Gets extension aliases supported by all plugins."""
aliases = set()
for plugin in self.plugins.values():
# we also check all classes that the plugins inherit to see if they
# directly provide support for an extension
for item in [plugin] + plugin.__class__.mro():
try:
aliases |= set(
getattr(item, "supported_extension_aliases", []))
except TypeError:
# we land here if a class has an @property decorator for
# supported extension aliases. They only work on objects.
pass
return aliases
def check_if_plugin_extensions_loaded(self):
"""Check if an extension supported by a plugin has been loaded."""
plugin_extensions = self.get_supported_extension_aliases()
missing_aliases = plugin_extensions - set(self.extensions)
if missing_aliases:
raise exceptions.ExtensionsNotFound(
extensions=list(missing_aliases))
class RequestExtension(object):
"""Extend requests and responses of core Neutron OpenStack API controllers.
Provide a way to add data to responses and handle custom request data
that is sent to core Neutron OpenStack API controllers.
"""
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
class ActionExtension(object):
"""Add custom actions to core Neutron OpenStack API controllers."""
def __init__(self, collection, action_name, handler):
self.collection = collection
self.action_name = action_name
self.handler = handler
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in Neutron."""
def __init__(self, collection, controller, parent=None, path_prefix="",
collection_actions={}, member_actions={}, attr_map={}):
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.path_prefix = path_prefix
self.attr_map = attr_map
# Returns the extension paths from a config entry and the __path__
# of neutron.extensions
def get_extensions_path():
paths = neutron.extensions.__path__
neutron_mods = repos.NeutronModules()
for x in neutron_mods.installed_list():
try:
paths += neutron_mods.module(x).extensions.__path__
except AttributeError:
# Occurs normally if module has no extensions sub-module
pass
if cfg.CONF.api_extensions_path:
paths.append(cfg.CONF.api_extensions_path)
# If the path has dups in it, from discovery + conf file, the duplicate
# import of the same module and super() do not play nicely, so weed
# out the duplicates, preserving search order.
z = collections.OrderedDict()
for x in paths:
z[x] = 1
paths = z.keys()
LOG.debug("get_extension_paths = %s", paths)
path = ':'.join(paths)
return path
def append_api_extensions_path(paths):
paths = list(set([cfg.CONF.api_extensions_path] + paths))
cfg.CONF.set_override('api_extensions_path',
':'.join([p for p in paths if p]))
| {
"content_hash": "f52699eded6e58b32cbb76b64cb8e3c5",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 79,
"avg_line_length": 37.68318318318318,
"alnum_prop": 0.5848109335777184,
"repo_name": "javaos74/neutron",
"id": "1246087f90d820f014ddba1c0e8e375dcfc373a4",
"size": "25772",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/api/extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7825979"
},
{
"name": "Shell",
"bytes": "13865"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class RoughnessValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="roughness", parent_name="isosurface.lighting", **kwargs
):
super(RoughnessValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "efb115823c4aa0a523dd95acde3731b6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 82,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.5861386138613861,
"repo_name": "plotly/plotly.py",
"id": "c16be5b024d04a5d85a08532dab58eeb5efa63a0",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/lighting/_roughness.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import collections
import os
import shutil
import subprocess
import tempfile
import xml.sax.handler
import xml.sax.saxutils
import xml.sax.xmlreader
def transform(source, layers):
filename_layers_visible = collections.defaultdict(lambda: [False] * len(layers))
for index, layer in enumerate(layers):
if layer.png_fn:
filename_layers_visible[layer.png_fn][index] = True
with tempfile.TemporaryDirectory() as dirname:
rotated = os.path.join(dirname, 'rotated.svg')
cut_ = os.path.join(dirname, 'cut.svg')
rotate(source, layers, rotated)
for destination, layers_visible in filename_layers_visible.items():
cut(rotated, layers_visible, cut_)
pngize(cut_, destination)
def rotate(source, layers, destination):
with open(destination, 'wb') as destination_file:
parser = xml.sax.make_parser()
parser.setContentHandler(RotatedRootGroupsXMLGenerator(layers, destination_file))
parser.setFeature(xml.sax.handler.feature_namespaces, True)
parser.parse(source)
def cut(source, layers_visible, destination):
args = ['inkscape', '--verb=LayerHideAll'] + ['--verb=LayerPrev'] * len(layers_visible)
for visible in layers_visible:
if visible:
args.append('--verb=LayerToggleHide')
args.append('--verb=LayerNext')
args += ['--verb=FitCanvasToDrawing', '--verb=FileSave', '--verb=FileQuit', destination]
shutil.copy2(source, destination)
subprocess.check_call(args)
def pngize(source, destination):
subprocess.check_call(['inkscape', '--export-png={}'.format(destination), source])
class Transformations:
def __init__(self, png_fn=None, rotation=0):
self.png_fn = png_fn
self.rotation = rotation
class RotatedRootGroupsXMLGenerator(xml.sax.saxutils.XMLGenerator):
ELEMENT_NAME = 'http://www.w3.org/2000/svg', 'g'
ATTRIBUTE_NAME = (None, 'transform')
ATTRIBUTE_QNAME = 'transform'
ATTRIBUTE_VALUE = 'rotate({0.rotation})'
def __init__(self, root_groups, out):
super().__init__(out, 'utf-8', short_empty_elements=True)
self.root_group_index = -1
self.groups_depth = 0
self.root_groups = root_groups
@classmethod
def is_group(cls, name):
return name == cls.ELEMENT_NAME
@classmethod
def make_attributes(cls, group):
value = cls.ATTRIBUTE_VALUE.format(group)
attrs = {cls.ATTRIBUTE_NAME: value}
qnames = {cls.ATTRIBUTE_NAME: cls.ATTRIBUTE_QNAME}
return xml.sax.xmlreader.AttributesNSImpl(attrs, qnames)
def startElementNS(self, name, qname, attrs):
super().startElementNS(name, qname, attrs)
if self.is_group(name):
if not self.groups_depth:
self.root_group_index += 1
attrs_ = self.make_attributes(self.root_groups[self.root_group_index])
super().startElementNS(name, qname, attrs_)
self.groups_depth += 1
def endElementNS(self, name, qname):
if self.is_group(name):
self.groups_depth -= 1
if not self.groups_depth:
super().endElementNS(name, qname)
super().endElementNS(name, qname)
if __name__ == '__main__':
alitrunk = Transformations('body.png')
alitrunk_stripes = Transformations('body.png')
antenna_left = Transformations()
antenna_right = Transformations('antenna.png', -40)
eye_left = Transformations('body.png')
eye_right = Transformations('body.png')
femur_left_hind = Transformations()
femur_left_middle = Transformations()
femur_left_front = Transformations()
femur_right_front = Transformations()
femur_right_hind = Transformations()
femur_right_middle = Transformations('femur.png', -58)
head = Transformations('body.png')
gaster = Transformations('body.png')
tibia_left_hind = Transformations('tibia_left_hind.png')
tibia_left_middle = Transformations('tibia_left_middle.png')
tibia_left_front = Transformations()
tibia_right_front = Transformations('tibia_front.png', -135)
tibia_right_hind = Transformations('tibia_right_hind.png')
tibia_right_middle = Transformations('tibia_right_middle.png')
transform('ant.svg', [
femur_right_middle,
tibia_right_middle,
femur_right_hind,
tibia_right_hind,
antenna_right,
antenna_left,
gaster,
femur_left_middle,
tibia_left_middle,
femur_left_hind,
tibia_left_hind,
femur_right_front,
tibia_right_front,
alitrunk,
alitrunk_stripes,
eye_right,head,
eye_left,
femur_left_front,
tibia_left_front])
| {
"content_hash": "bd1932bd535fb0ffd7d692ce989b09b3",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 92,
"avg_line_length": 33.42253521126761,
"alnum_prop": 0.6451748841129372,
"repo_name": "kosova-holy/Ant",
"id": "5bd32abf467ace6b6f407d98349cda7db9dd8103",
"size": "5369",
"binary": false,
"copies": "1",
"ref": "refs/heads/wip",
"path": "build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10250"
}
],
"symlink_target": ""
} |
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
import six
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
import nova.network
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('use_neutron_default_nets',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('neutron_default_tenant_id',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('quota_networks',
'nova.api.openstack.compute.contrib.os_tenant_networks')
ALIAS = 'os-tenant-networks'
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def network_dict(network):
return {"id": network.get("uuid") or network.get("id"),
"cidr": str(network.get("cidr")),
"label": network.get("label")}
class TenantNetworkController(object):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
@extensions.expected_errors((403, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_LE("Failed to update usages deallocating "
"network."))
def _rollback_quota(reservation):
if CONF.enable_network_quota and reservation:
QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
_rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
_rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
_rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
@extensions.expected_errors((400, 403, 503))
def create(self, req, body):
if not body:
_msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=_msg)
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = dict((k, network.get(k)) for k in keys)
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class TenantNetworks(extensions.V3APIExtensionBase):
"""Tenant-based Network Management Extension."""
name = "TenantNetworks"
alias = ALIAS
version = 1
def get_resources(self):
ext = extensions.ResourceExtension(ALIAS, TenantNetworkController())
return [ext]
def get_controller_extensions(self):
return []
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
| {
"content_hash": "bd376c1550a4da8782642a8af2a9d0cc",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 76,
"avg_line_length": 36.308457711442784,
"alnum_prop": 0.5976979994519046,
"repo_name": "badock/nova",
"id": "90d3c29b730ddfd6300251cc7361b5695d0b946a",
"size": "7935",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/plugins/v3/tenant_networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "112"
},
{
"name": "PLpgSQL",
"bytes": "2958"
},
{
"name": "Python",
"bytes": "15441440"
},
{
"name": "Shell",
"bytes": "20796"
},
{
"name": "Smarty",
"bytes": "693857"
}
],
"symlink_target": ""
} |
"""Objects relating to sourcing secrets from AWS Secrets Manager"""
from typing import Optional
import boto3
from cached_property import cached_property
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
class SecretsManagerBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connection or Variables from AWS Secrets Manager
Configurable via ``airflow.cfg`` like so:
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend
backend_kwargs = {"connections_prefix": "airflow/connections"}
For example, if secrets prefix is ``airflow/connections/smtp_default``, this would be accessible
if you provide ``{"connections_prefix": "airflow/connections"}`` and request conn_id ``smtp_default``.
If variables prefix is ``airflow/variables/hello``, this would be accessible
if you provide ``{"variables_prefix": "airflow/variables"}`` and request variable key ``hello``.
And if config_prefix is ``airflow/config/sql_alchemy_conn``, this would be accessible
if you provide ``{"config_prefix": "airflow/config"}`` and request config
key ``sql_alchemy_conn``.
You can also pass additional keyword arguments like ``aws_secret_access_key``, ``aws_access_key_id``
or ``region_name`` to this class and they would be passed on to Boto3 client.
:param connections_prefix: Specifies the prefix of the secret to read to get Connections.
If set to None (null), requests for connections will not be sent to AWS Secrets Manager
:type connections_prefix: str
:param variables_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for variables will not be sent to AWS Secrets Manager
:type variables_prefix: str
:param config_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for configurations will not be sent to AWS Secrets Manager
:type config_prefix: str
:param profile_name: The name of a profile to use. If not given, then the default profile is used.
:type profile_name: str
:param sep: separator used to concatenate secret_prefix and secret_id. Default: "/"
:type sep: str
"""
def __init__(
self,
connections_prefix: str = 'airflow/connections',
variables_prefix: str = 'airflow/variables',
config_prefix: str = 'airflow/config',
profile_name: Optional[str] = None,
sep: str = "/",
**kwargs,
):
super().__init__()
if connections_prefix is not None:
self.connections_prefix = connections_prefix.rstrip("/")
else:
self.connections_prefix = connections_prefix
if variables_prefix is not None:
self.variables_prefix = variables_prefix.rstrip('/')
else:
self.variables_prefix = variables_prefix
if config_prefix is not None:
self.config_prefix = config_prefix.rstrip('/')
else:
self.config_prefix = config_prefix
self.profile_name = profile_name
self.sep = sep
self.kwargs = kwargs
@cached_property
def client(self):
"""Create a Secrets Manager client"""
session = boto3.session.Session(
profile_name=self.profile_name,
)
return session.client(service_name="secretsmanager", **self.kwargs)
def get_conn_uri(self, conn_id: str) -> Optional[str]:
"""
Get Connection Value
:param conn_id: connection id
:type conn_id: str
"""
if self.connections_prefix is None:
return None
return self._get_secret(self.connections_prefix, conn_id)
def get_variable(self, key: str) -> Optional[str]:
"""
Get Airflow Variable
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret(self.variables_prefix, key)
def get_config(self, key: str) -> Optional[str]:
"""
Get Airflow Configuration
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key)
def _get_secret(self, path_prefix: str, secret_id: str) -> Optional[str]:
"""
Get secret value from Secrets Manager
:param path_prefix: Prefix for the Path to get Secret
:type path_prefix: str
:param secret_id: Secret Key
:type secret_id: str
"""
secrets_path = self.build_path(path_prefix, secret_id, self.sep)
try:
response = self.client.get_secret_value(
SecretId=secrets_path,
)
return response.get('SecretString')
except self.client.exceptions.ResourceNotFoundException:
self.log.debug(
"An error occurred (ResourceNotFoundException) when calling the "
"get_secret_value operation: "
"Secret %s not found.",
secrets_path,
)
return None
| {
"content_hash": "700e64f08d13c6189381daa7f60b1ee5",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 106,
"avg_line_length": 37.50704225352113,
"alnum_prop": 0.6370634622606083,
"repo_name": "airbnb/airflow",
"id": "37005e2ed0c27cb4097ad6d53c16d6c0c32a3774",
"size": "6113",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/secrets/secrets_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
import json
from pulsar.apps.test import test_timeout
from lux.utils import test
class TestSqlite(test.AppTestCase):
config_file = 'tests.auth'
config_params = {'DATASTORE': 'sqlite://'}
def test_backend(self):
backend = self.app.auth_backend
self.assertTrue(backend)
self.assertTrue(backend.backends)
def test_get_user_none(self):
backend = self.app.auth_backend
request = self.app.wsgi_request()
user = backend.get_user(request, user_id=18098098)
self.assertEqual(user, None)
user = backend.get_user(request, email='[email protected]')
self.assertEqual(user, None)
user = backend.get_user(request, username='dhvfvhsdfgvhfd')
self.assertEqual(user, None)
def test_create_user(self):
backend = self.app.auth_backend
request = self.app.wsgi_request()
user = backend.create_user(request,
username='pippo',
email='[email protected]',
password='pluto',
first_name='Pippo')
self.assertTrue(user.id)
self.assertEqual(user.first_name, 'Pippo')
self.assertFalse(user.is_superuser())
self.assertFalse(user.is_active())
# make it active
with self.app.odm().begin() as session:
user.active = True
session.add(user)
self.assertTrue(user.is_active())
def test_create_superuser(self):
backend = self.app.auth_backend
request = self.app.wsgi_request()
user = backend.create_superuser(request,
username='foo',
email='[email protected]',
password='pluto',
first_name='Foo')
self.assertTrue(user.id)
self.assertEqual(user.first_name, 'Foo')
self.assertTrue(user.is_superuser())
self.assertTrue(user.is_active())
def test_get(self):
request = self.client.get('/')
response = request.response
self.assertEqual(response.status_code, 200)
user = request.cache.user
self.assertFalse(user.is_authenticated())
def test_authorizations(self):
request = self.client.get('/authorizations')
response = request.response
self.assertEqual(response.status_code, 401)
user = request.cache.user
self.assertFalse(user.is_authenticated())
def test_login_fail(self):
data = {'username': 'jdshvsjhvcsd',
'password': 'dksjhvckjsahdvsf'}
request = self.client.post('/authorizations',
content_type='application/json',
body=data)
response = request.response
self.assertEqual(response.status_code, 200)
user = request.cache.user
self.assertFalse(user.is_authenticated())
self.assertEqual(response['content-type'],
'application/json; charset=utf-8')
def test_create_superuser_command_and_token(self):
username = 'ghghghgh'
password = 'dfbjdhbvdjbhv'
user = self.client.run_command('create_superuser',
['--username', username,
'--email', '[email protected]',
'--password', password])
self.assertEqual(user.username, username)
self.assertNotEqual(user.password, password)
# Get new token
request = self.client.post('/authorizations',
content_type='application/json',
body={'username': username,
'password': password})
response = request.response
self.assertEqual(response.status_code, 201)
user = request.cache.user
self.assertFalse(user.is_authenticated())
self.assertEqual(response['content-type'],
'application/json; charset=utf-8')
data = json.loads(response.content[0].decode('utf-8'))
self.assertTrue('token' in data)
| {
"content_hash": "0e725d3dcc052da0093b4c603fc06993",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 38.44642857142857,
"alnum_prop": 0.5496980956804459,
"repo_name": "tazo90/lux",
"id": "c9a1091b4c86ced68fdea88459dc592939e27ced",
"size": "4306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/auth/sqlite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85029"
},
{
"name": "HTML",
"bytes": "17331"
},
{
"name": "JavaScript",
"bytes": "354892"
},
{
"name": "Python",
"bytes": "543161"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from pants.bsp.spec.base import BuildTargetIdentifier
# -----------------------------------------------------------------------------------------------
# Compile Request
# See https://build-server-protocol.github.io/docs/specification.html#compile-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class CompileParams:
# A sequence of build targets to compile.
targets: tuple[BuildTargetIdentifier, ...]
# A unique identifier generated by the client to identify this request.
# The server may include this id in triggered notifications or responses.
origin_id: str | None = None
# Optional arguments to the compilation process.
arguments: tuple[str, ...] | None = ()
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
targets=tuple(BuildTargetIdentifier.from_json_dict(x) for x in d["targets"]),
origin_id=d.get("originId"),
arguments=tuple(d["arguments"]) if "arguments" in d else None,
)
def to_json_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {"targets": [tgt.to_json_dict() for tgt in self.targets]}
if self.origin_id is not None:
result["originId"] = self.origin_id
if self.arguments is not None:
result["arguments"] = self.arguments
return result
@dataclass(frozen=True)
class CompileResult:
# An optional request id to know the origin of this report.
origin_id: str | None
# A status code for the execution.
status_code: int
# Kind of data to expect in the `data` field. If this field is not set, the kind of data is not specified.
data_kind: str | None = None
# A field containing language-specific information, like products
# of compilation or compiler-specific metadata the client needs to know.
data: Any | None = None
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
origin_id=d.get("originId"),
status_code=d["statusCode"],
data_kind=d.get("dataKind"),
data=d.get("data"),
)
def to_json_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {
"statusCode": self.status_code,
}
if self.origin_id is not None:
result["originId"] = self.origin_id
if self.data_kind is not None:
result["dataKind"] = self.data_kind
if self.data is not None:
result["data"] = self.data # TODO: Enforce to_json_dict available
return result
@dataclass(frozen=True)
class CompileTask:
target: BuildTargetIdentifier
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(target=BuildTargetIdentifier.from_json_dict(d["target"]))
def to_json_dict(self) -> dict[str, Any]:
return {"target": self.target.to_json_dict()}
@dataclass(frozen=True)
class CompileReport:
# The build target that was compiled
target: BuildTargetIdentifier
# An optional request id to know the origin of this report.
origin_id: str | None
# The total number of reported errors compiling this target.
errors: int
# The total number of reported warnings compiling the target.
warnings: int
# The total number of milliseconds it took to compile the target.
time: int | None = None
# The compilation was a noOp compilation.
no_op: bool | None = None
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
target=BuildTargetIdentifier.from_json_dict(d["target"]),
origin_id=d.get("originId"),
errors=d["errors"],
warnings=d["warnings"],
time=d.get("time"),
no_op=d.get("noOp"),
)
def to_json_dict(self) -> dict[str, Any]:
result = {
"target": self.target.to_json_dict(),
"errors": self.errors,
"warnings": self.warnings,
}
if self.origin_id is not None:
result["originId"] = self.origin_id
if self.time is not None:
result["time"] = self.time
if self.no_op is not None:
result["noOp"] = self.no_op
return result
| {
"content_hash": "9295d6543826bea95672ced08d8edff3",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 110,
"avg_line_length": 32.81481481481482,
"alnum_prop": 0.5918735891647856,
"repo_name": "benjyw/pants",
"id": "7c080f24eb20231ba5056d595ad7fe81843f579b",
"size": "4561",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/bsp/spec/compile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from mtr.sync.lib.helpers import column_name, column_index, \
model_attributes, process_attribute, cell_value
from mtr.sync.tests import SyncTestMixin
from mtr.sync.lib.processors import csv
from ...models import Person, Office, Tag
class HelpersTest(SyncTestMixin, TestCase):
MODEL = Person
RELATED_MODEL = Office
RELATED_MANY = Tag
PROCESSOR = csv.CsvProcessor
def test_column_index_value_transform(self):
self.assertEqual(column_name(0), 'A')
self.assertEqual(column_name(25), 'Z')
self.assertEqual(column_index(10), 10)
self.assertEqual(column_index('A'), 0)
def test_row_values_from_col(self):
cols = ['1', '2', '3', '4', '5']
self.assertEqual(cell_value(cols, 0), cols[0])
self.assertEqual(cell_value(cols, 'A'), cols[0])
self.assertEqual(cell_value(cols, '4'), cols[4])
self.assertEqual(cell_value(cols, 'A-C'), cols[:3])
self.assertEqual(cell_value(cols, 'A-C,B'), cols[:3] + [cols[1]])
self.assertEqual(
cell_value(cols, 'A,B,D|'), ' '.join(cols[:2] + [cols[3]]))
self.assertEqual(
cell_value(cols, 'A-D,A-F,B|'), ' '.join(
cols[:4] + cols[:5] + [cols[1]]))
self.assertEqual(
cell_value(cols, 'A-D,B|+A-D,B|'), [
' '.join(cols[:4] + [cols[1]]),
' '.join(cols[:4] + [cols[1]])
])
def test_model_attributes(self):
fields = model_attributes(self.settings)
fields = [f[0] for f in fields]
self.assertEqual([
'id', 'name', 'surname', 'gender', 'security_level',
'office|_fk_|id', 'office|_fk_|office', 'office|_fk_|address',
'tags|_m_|id', 'tags|_m_|name',
'custom_method', 'none_param'], fields)
def test_process_attribute(self):
self.assertEqual(
process_attribute(
self.instance, 'name'), self.instance.name)
self.assertEqual(
process_attribute(
self.instance, 'office|_fk_|address'),
self.instance.office.address)
self.assertEqual(
process_attribute(
self.instance, 'notexist'), None)
self.assertEqual(
process_attribute(
self.instance, 'office|_fk_|notexist|_fk_|attr'), None)
| {
"content_hash": "16383c8e1c542b10d21c92c5e45228ed",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 35.74626865671642,
"alnum_prop": 0.5620041753653444,
"repo_name": "mtrgroup/django-mtr-sync",
"id": "f1bb941162d89b68bc3886bd811d3f228918b077",
"size": "2395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/tests/lib/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "192118"
},
{
"name": "Python",
"bytes": "137393"
}
],
"symlink_target": ""
} |
from csc.nl.euro import StemmedEuroNL
def NL():
return StemmedEuroNL('hu')
| {
"content_hash": "ea82877fef2cca792252d29a379bfd84",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 37,
"avg_line_length": 20,
"alnum_prop": 0.725,
"repo_name": "pbarton666/buzz_bot",
"id": "39784ace7eedad0c2ba8151b1b12257bdc2a921b",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoproj/djangoapp/csc/nl/hu/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "10587"
},
{
"name": "CSS",
"bytes": "36405"
},
{
"name": "Genshi",
"bytes": "61664"
},
{
"name": "Groff",
"bytes": "3300"
},
{
"name": "HTML",
"bytes": "201497"
},
{
"name": "JavaScript",
"bytes": "39255"
},
{
"name": "Makefile",
"bytes": "200"
},
{
"name": "Python",
"bytes": "10629713"
},
{
"name": "Ruby",
"bytes": "12049"
}
],
"symlink_target": ""
} |
"""
Build a combined news file from news fragments.
"""
from __future__ import absolute_import, division, print_function
import os
import click
import sys
from datetime import date
from ._settings import load_config_from_options, ConfigError
from ._builder import find_fragments, split_fragments, render_fragments
from ._project import get_version, get_project_name
from ._writer import append_to_newsfile
from ._git import remove_files, stage_newsfile
def _get_date():
return date.today().isoformat()
@click.command(name="build")
@click.option(
"--draft",
"draft",
default=False,
flag_value=True,
help="Render the news fragments, don't write to files, don't check versions.",
)
@click.option("--config", "config_file", default=None, help="Configuration file name.")
@click.option("--dir", "directory", default=None)
@click.option("--name", "project_name", default=None)
@click.option(
"--version",
"project_version",
default=None,
help="Render the news fragments using given version.",
)
@click.option("--date", "project_date", default=None)
@click.option(
"--yes",
"answer_yes",
default=False,
flag_value=True,
help="Do not ask for confirmation to remove news fragments.",
)
def _main(
draft,
directory,
config_file,
project_name,
project_version,
project_date,
answer_yes,
):
try:
return __main(
draft,
directory,
config_file,
project_name,
project_version,
project_date,
answer_yes,
)
except ConfigError as e:
print(e, file=sys.stderr)
sys.exit(1)
def __main(
draft,
directory,
config_file,
project_name,
project_version,
project_date,
answer_yes,
):
"""
The main entry point.
"""
base_directory, config = load_config_from_options(directory, config_file)
to_err = draft
click.echo("Loading template...", err=to_err)
with open(config["template"], "rb") as tmpl:
template = tmpl.read().decode("utf8")
click.echo("Finding news fragments...", err=to_err)
definitions = config["types"]
if config.get("directory"):
fragment_base_directory = os.path.abspath(config["directory"])
fragment_directory = None
else:
fragment_base_directory = os.path.abspath(
os.path.join(base_directory, config["package_dir"], config["package"])
)
fragment_directory = "newsfragments"
fragments, fragment_filenames = find_fragments(
fragment_base_directory, config["sections"], fragment_directory, definitions
)
click.echo("Rendering news fragments...", err=to_err)
fragments = split_fragments(
fragments, definitions, all_bullets=config["all_bullets"]
)
if project_version is None:
project_version = config.get('version')
if project_version is None:
project_version = get_version(
os.path.join(base_directory, config["package_dir"]), config["package"]
).strip()
if project_name is None:
project_name = config.get('name')
if not project_name:
package = config.get("package")
if package:
project_name = get_project_name(
os.path.abspath(os.path.join(base_directory, config["package_dir"])),
package,
)
else:
# Can't determine a project_name, but maybe it is not needed.
project_name = ""
if project_date is None:
project_date = _get_date().strip()
if config["title_format"]:
top_line = config["title_format"].format(
name=project_name, version=project_version, project_date=project_date
)
else:
top_line = ""
rendered = render_fragments(
# The 0th underline is used for the top line
template,
config["issue_format"],
top_line,
fragments,
definitions,
config["underlines"][1:],
config["wrap"],
{"name": project_name, "version": project_version, "date": project_date},
top_underline=config["underlines"][0],
all_bullets=config["all_bullets"],
)
if draft:
click.echo(
"Draft only -- nothing has been written.\n"
"What is seen below is what would be written.\n",
err=to_err,
)
click.echo(rendered)
else:
click.echo("Writing to newsfile...", err=to_err)
start_string = config["start_string"]
news_file = config["filename"]
if config["single_file"]:
# When single_file is enabled, the news file name changes based on the version.
news_file = news_file.format(
name=project_name, version=project_version, project_date=project_date
)
append_to_newsfile(
base_directory,
news_file,
start_string,
top_line,
rendered,
single_file=config["single_file"],
)
click.echo("Staging newsfile...", err=to_err)
stage_newsfile(base_directory, news_file)
click.echo("Removing news fragments...", err=to_err)
remove_files(fragment_filenames, answer_yes)
click.echo("Done!", err=to_err)
if __name__ == "__main__": # pragma: no cover
_main()
| {
"content_hash": "d1cb1ac8ad3584b95b54f70762a07ec9",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 91,
"avg_line_length": 27.984615384615385,
"alnum_prop": 0.5924500641378047,
"repo_name": "hawkowl/towncrier",
"id": "2f02eec222dcae52d7c4212f301cdb1f043e5e23",
"size": "5519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/towncrier/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "1157"
},
{
"name": "Python",
"bytes": "75702"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
class Migration(DataMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
"Write your forwards methods here."
projects = orm.Project.objects.all().select_related('team')
for project in RangeQuerySetWrapperWithProgressBar(projects):
try:
with transaction.atomic():
orm.ProjectTeam.objects.create(
project=project,
team=project.team,
)
except IntegrityError:
pass
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'8c2727381d5f41eda719ebab3ab8bd920ecb60c63bd1417e892dba232b472b65'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'12f0b0da1ec14b8588328a66b0bf5458327473702e7c400a8865d1c78b4233d8'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Famous Goose'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'315c19be5328426e81e059c18e6064b1'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 1, 4, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 2, 3, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'4db40bfa5d9b417ab64c308c09e866473e3e1cfa6e3c4853a687c278992b2ac6'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'478a2cc485184b599ed1f05fd17eb33483d617d9d3234e8dbccc2f3f3767ba19'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 1, 11, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'team_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'team_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'7e400a9fb6d34efcaa03ae0413f6e305'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'instance'),)", 'object_name': 'IdentityProvider'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 2, 3, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'b09bef3bd4b045adb0fdfcaf32356ea0'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'b83bafe9565f4662b6237e9c27c845701151e4ee47a7420483d8f1833901de75'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'Pl4IxIjX5tCaiJsjYssR73mcbbUtjHfx'", 'max_length': '32'})
},
'sentry.useridentity': {
'Meta': {'unique_together': "(('user', 'identity'),)", 'object_name': 'UserIdentity'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identity': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Identity']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
symmetrical = True
| {
"content_hash": "af6c4d05bf78f220d5e8509fa4472f7f",
"timestamp": "",
"source": "github",
"line_count": 1055,
"max_line_length": 233,
"avg_line_length": 90.6388625592417,
"alnum_prop": 0.5784531080063582,
"repo_name": "looker/sentry",
"id": "a469cb2d5f490886f12242ddb233b699c795c468",
"size": "95648",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0373_backfill_projectteam.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
return get_logger("teter", log_file, log_level)
| {
"content_hash": "606cdba13ab63ea71a37461b632796a0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7484662576687117,
"repo_name": "SysCV/tet",
"id": "cd9407ca8414bc9c5b2c3a0de6d3609f36098989",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "teter/utils/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "379313"
},
{
"name": "Shell",
"bytes": "1714"
}
],
"symlink_target": ""
} |
raise NotImplementedError("urllib2 is not yet implemented in Skulpt")
| {
"content_hash": "32e11439855e14375339fb5a4117abce",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 69,
"avg_line_length": 70,
"alnum_prop": 0.8285714285714286,
"repo_name": "ArcherSys/ArcherSys",
"id": "b066b301407b362d39723225eeb4658ca150f3f1",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skulpt/src/lib/urllib2.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import pkg_resources
from nova.api.openstack.compute import extensions as computeextensions
from nova.api.openstack import extensions
from nova.openstack.common.plugin import plugin
from nova import test
class StubController(object):
def i_am_the_stub(self):
pass
class StubControllerExtension(extensions.ExtensionDescriptor):
"""This is a docstring. We need it."""
name = 'stubextension'
alias = 'stubby'
def get_resources(self):
resources = []
res = extensions.ResourceExtension('testme',
StubController())
resources.append(res)
return resources
service_list = []
class TestPluginClass(plugin.Plugin):
def __init__(self, service_name):
super(TestPluginClass, self).__init__(service_name)
self._add_api_extension_descriptor(StubControllerExtension)
service_list.append(service_name)
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return TestPluginClass
class APITestCase(test.TestCase):
"""Test case for the plugin api extension interface."""
def test_add_extension(self):
def mock_load(_s):
return TestPluginClass()
def mock_iter_entry_points(_t):
return [MockEntrypoint("fake", "fake", ["fake"])]
self.stubs.Set(pkg_resources, 'iter_entry_points',
mock_iter_entry_points)
global service_list
service_list = []
# Marking out the default extension paths makes this test MUCH faster.
self.flags(osapi_compute_extension=[])
found = False
mgr = computeextensions.ExtensionManager()
for res in mgr.get_resources():
# We have to use this weird 'dir' check because
# the plugin framework muddies up the classname
# such that 'isinstance' doesn't work right.
if 'i_am_the_stub' in dir(res.controller):
found = True
self.assertTrue(found)
self.assertEqual(len(service_list), 1)
self.assertEqual(service_list[0], 'compute-extensions')
| {
"content_hash": "401c92522e1893f1665281a73915e4e1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 29.541666666666668,
"alnum_prop": 0.6389280677009873,
"repo_name": "maheshp/novatest",
"id": "3aac638c685c08356c2159459ad580599874c7a5",
"size": "2763",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "nova/tests/test_plugin_api_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "8947329"
},
{
"name": "Shell",
"bytes": "17067"
}
],
"symlink_target": ""
} |
__all__ = ['entropy_vn', 'entropy_linear', 'entropy_mutual', 'negativity',
'concurrence', 'entropy_conditional', 'entangling_power',
'entropy_relative']
from numpy import conj, e, inf, imag, inner, real, sort, sqrt
from numpy.lib.scimath import log, log2
from qutip.qobj import ptrace
from qutip.states import ket2dm
from qutip.tensor import tensor
from qutip.operators import sigmay
from qutip.sparse import sp_eigs
from qutip.partial_transpose import partial_transpose
def entropy_vn(rho, base=e, sparse=False):
"""
Von-Neumann entropy of density matrix
Parameters
----------
rho : qobj
Density matrix.
base : {e,2}
Base of logarithm.
sparse : {False,True}
Use sparse eigensolver.
Returns
-------
entropy : float
Von-Neumann entropy of `rho`.
Examples
--------
>>> rho=0.5*fock_dm(2,0)+0.5*fock_dm(2,1)
>>> entropy_vn(rho,2)
1.0
"""
if rho.type == 'ket' or rho.type == 'bra':
rho = ket2dm(rho)
vals = sp_eigs(rho.data, rho.isherm, vecs=False, sparse=sparse)
nzvals = vals[vals != 0]
if base == 2:
logvals = log2(nzvals)
elif base == e:
logvals = log(nzvals)
else:
raise ValueError("Base must be 2 or e.")
return float(real(-sum(nzvals * logvals)))
def entropy_linear(rho):
"""
Linear entropy of a density matrix.
Parameters
----------
rho : qobj
sensity matrix or ket/bra vector.
Returns
-------
entropy : float
Linear entropy of rho.
Examples
--------
>>> rho=0.5*fock_dm(2,0)+0.5*fock_dm(2,1)
>>> entropy_linear(rho)
0.5
"""
if rho.type == 'ket' or rho.type == 'bra':
rho = ket2dm(rho)
return float(real(1.0 - (rho ** 2).tr()))
def concurrence(rho):
"""
Calculate the concurrence entanglement measure for a two-qubit state.
Parameters
----------
state : qobj
Ket, bra, or density matrix for a two-qubit state.
Returns
-------
concur : float
Concurrence
References
----------
.. [1] https://en.wikipedia.org/wiki/Concurrence_(quantum_computing)
"""
if rho.isket and rho.dims != [[2, 2], [1, 1]]:
raise Exception("Ket must be tensor product of two qubits.")
elif rho.isbra and rho.dims != [[1, 1], [2, 2]]:
raise Exception("Bra must be tensor product of two qubits.")
elif rho.isoper and rho.dims != [[2, 2], [2, 2]]:
raise Exception("Density matrix must be tensor product of two qubits.")
if rho.isket or rho.isbra:
rho = ket2dm(rho)
sysy = tensor(sigmay(), sigmay())
rho_tilde = (rho * sysy) * (rho.conj() * sysy)
evals = rho_tilde.eigenenergies()
# abs to avoid problems with sqrt for very small negative numbers
evals = abs(sort(real(evals)))
lsum = sqrt(evals[3]) - sqrt(evals[2]) - sqrt(evals[1]) - sqrt(evals[0])
return max(0, lsum)
def negativity(rho, subsys, method='tracenorm', logarithmic=False):
"""
Compute the negativity for a multipartite quantum system described
by the density matrix rho. The subsys argument is an index that
indicates which system to compute the negativity for.
.. note::
Experimental.
"""
mask = [idx == subsys for idx, n in enumerate(rho.dims[0])]
rho_pt = partial_transpose(rho, mask)
if method == 'tracenorm':
N = ((rho_pt.dag() * rho_pt).sqrtm().tr().real - 1)/2.0
elif method == 'eigenvalues':
l = rho_pt.eigenenergies()
N = ((abs(l)-l)/2).sum()
else:
raise ValueError("Unknown method %s" % method)
if logarithmic:
return log2(2 * N + 1)
else:
return N
def entropy_mutual(rho, selA, selB, base=e, sparse=False):
"""
Calculates the mutual information S(A:B) between selection
components of a system density matrix.
Parameters
----------
rho : qobj
Density matrix for composite quantum systems
selA : int/list
`int` or `list` of first selected density matrix components.
selB : int/list
`int` or `list` of second selected density matrix components.
base : {e,2}
Base of logarithm.
sparse : {False,True}
Use sparse eigensolver.
Returns
-------
ent_mut : float
Mutual information between selected components.
"""
if isinstance(selA, int):
selA = [selA]
if isinstance(selB, int):
selB = [selB]
if rho.type != 'oper':
raise TypeError("Input must be a density matrix.")
if (len(selA) + len(selB)) != len(rho.dims[0]):
raise TypeError("Number of selected components must match " +
"total number.")
rhoA = ptrace(rho, selA)
rhoB = ptrace(rho, selB)
out = (entropy_vn(rhoA, base, sparse=sparse) +
entropy_vn(rhoB, base, sparse=sparse) -
entropy_vn(rho, base, sparse=sparse))
return out
def entropy_relative(rho, sigma, base=e, sparse=False, tol=1e-12):
"""
Calculates the relative entropy S(rho||sigma) between two density
matrices.
Parameters
----------
rho : :class:`qutip.Qobj`
First density matrix (or ket which will be converted to a density
matrix).
sigma : :class:`qutip.Qobj`
Second density matrix (or ket which will be converted to a density
matrix).
base : {e,2}
Base of logarithm. Defaults to e.
sparse : bool
Flag to use sparse solver when determining the eigenvectors
of the density matrices. Defaults to False.
tol : float
Tolerance to use to detect 0 eigenvalues or dot producted between
eigenvectors. Defaults to 1e-12.
Returns
-------
rel_ent : float
Value of relative entropy. Guaranteed to be greater than zero
and should equal zero only when rho and sigma are identical.
Examples
--------
First we define two density matrices:
>>> rho = qutip.ket2dm(qutip.ket("00"))
>>> sigma = rho + qutip.ket2dm(qutip.ket("01"))
>>> sigma = sigma.unit()
Then we calculate their relative entropy using base 2 (i.e. ``log2``)
and base e (i.e. ``log``).
>>> qutip.entropy_relative(rho, sigma, base=2)
1.0
>>> qutip.entropy_relative(rho, sigma)
0.6931471805599453
References
----------
See Nielsen & Chuang, "Quantum Computation and Quantum Information",
Section 11.3.1, pg. 511 for a detailed explanation of quantum relative
entropy.
"""
if rho.isket:
rho = ket2dm(rho)
if sigma.isket:
sigma = ket2dm(sigma)
if not rho.isoper or not sigma.isoper:
raise TypeError("Inputs must be density matrices.")
if rho.dims != sigma.dims:
raise ValueError("Inputs must have the same shape and dims.")
if base == 2:
log_base = log2
elif base == e:
log_base = log
else:
raise ValueError("Base must be 2 or e.")
# S(rho || sigma) = sum_i(p_i log p_i) - sum_ij(p_i P_ij log q_i)
#
# S is +inf if the kernel of sigma (i.e. svecs[svals == 0]) has non-trivial
# intersection with the support of rho (i.e. rvecs[rvals != 0]).
rvals, rvecs = sp_eigs(rho.data, rho.isherm, vecs=True, sparse=sparse)
if any(abs(imag(rvals)) >= tol):
raise ValueError("Input rho has non-real eigenvalues.")
rvals = real(rvals)
svals, svecs = sp_eigs(sigma.data, sigma.isherm, vecs=True, sparse=sparse)
if any(abs(imag(svals)) >= tol):
raise ValueError("Input sigma has non-real eigenvalues.")
svals = real(svals)
# Calculate inner products of eigenvectors and return +inf if kernel
# of sigma overlaps with support of rho.
P = abs(inner(rvecs, conj(svecs))) ** 2
if (rvals >= tol) @ (P >= tol) @ (svals < tol):
return inf
# Avoid -inf from log(0) -- these terms will be multiplied by zero later
# anyway
svals[abs(svals) < tol] = 1
nzrvals = rvals[abs(rvals) >= tol]
# Calculate S
S = nzrvals @ log_base(nzrvals) - rvals @ P @ log_base(svals)
# the relative entropy is guaranteed to be >= 0, so we clamp the
# calculated value to 0 to avoid small violations of the lower bound.
return max(0, S)
def entropy_conditional(rho, selB, base=e, sparse=False):
"""
Calculates the conditional entropy :math:`S(A|B)=S(A,B)-S(B)`
of a selected density matrix component.
Parameters
----------
rho : qobj
Density matrix of composite object
selB : int/list
Selected components for density matrix B
base : {e,2}
Base of logarithm.
sparse : {False,True}
Use sparse eigensolver.
Returns
-------
ent_cond : float
Value of conditional entropy
"""
if rho.type != 'oper':
raise TypeError("Input must be density matrix.")
if isinstance(selB, int):
selB = [selB]
B = ptrace(rho, selB)
out = (entropy_vn(rho, base, sparse=sparse) -
entropy_vn(B, base, sparse=sparse))
return out
def participation_ratio(rho):
"""
Returns the effective number of states for a density matrix.
The participation is unity for pure states, and maximally N,
where N is the Hilbert space dimensionality, for completely
mixed states.
Parameters
----------
rho : qobj
Density matrix
Returns
-------
pr : float
Effective number of states in the density matrix
"""
if rho.type == 'ket' or rho.type == 'bra':
return 1.0
else:
return 1.0 / (rho ** 2).tr()
def entangling_power(U):
"""
Calculate the entangling power of a two-qubit gate U, which
is zero of nonentangling gates and 1 and 2/9 for maximally
entangling gates.
Parameters
----------
U : qobj
Qobj instance representing a two-qubit gate.
Returns
-------
ep : float
The entanglement power of U (real number between 0 and 1)
References:
Explorations in Quantum Computing, Colin P. Williams (Springer, 2011)
"""
if not U.isoper:
raise Exception("U must be an operator.")
if U.dims != [[2, 2], [2, 2]]:
raise Exception("U must be a two-qubit gate.")
from qutip.qip.operations.gates import swap
a = (tensor(U, U).dag() * swap(N=4, targets=[1, 3]) *
tensor(U, U) * swap(N=4, targets=[1, 3]))
b = (tensor(swap() * U, swap() * U).dag() * swap(N=4, targets=[1, 3]) *
tensor(swap() * U, swap() * U) * swap(N=4, targets=[1, 3]))
return 5.0/9 - 1.0/36 * (a.tr() + b.tr()).real
| {
"content_hash": "3c20bf54fcedcb1f8a8d73047d018a13",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 79,
"avg_line_length": 28.132275132275133,
"alnum_prop": 0.596482979123566,
"repo_name": "qutip/qutip",
"id": "2bf068db67e059bf670f35ebaee19bf865737cd9",
"size": "10634",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qutip/entropy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "13979"
},
{
"name": "Cython",
"bytes": "354994"
},
{
"name": "OpenQASM",
"bytes": "1718"
},
{
"name": "Python",
"bytes": "2810040"
}
],
"symlink_target": ""
} |
from datetime import datetime
from time import sleep
from django.test import TestCase
from django.http import Http404
from django.db import models
from documents.models import Document, DocumentPartF, DocumentPartB
from documents.retrospection import now, set_now
from documents.fields import DocumentForeignKey
# models for doc-test of modified example from django tutorial
class Choice(Document):
choice = models.CharField(max_length=200)
def __unicode__(self):
return self.choice
class Poll(Document):
question = models.CharField(max_length=200)
def __unicode__(self):
return self.question
class PollChoices(DocumentPartB):
poll = models.ForeignKey(Poll)
choice = models.ForeignKey(Choice)
@classmethod
def to_master(cls):
return 'poll'
class PollResults(Document):
poll = models.ForeignKey(Poll)
choice = models.ForeignKey(Choice)
votes = models.IntegerField()
@staticmethod
def vote(poll_document_id, choice_document_id):
n = datetime.now()
p = Poll.document_get(n, document_id=poll_document_id)
c = Choice.document_get(n, document_id=choice_document_id)
try:
v = PollResults.document_get(
n, poll__document_id=poll_document_id,
choice__document_id=choice_document_id)
v.votes += 1
except PollResults.DoesNotExist:
v = PollResults(poll=p, choice=c, votes=1)
v.document_save()
return v.document_id
polltest = """
# Пока нет голосований
>>> Poll.objects.all()
[]
# Создаем новое
>>> p = Poll(question="Who is who?")
# Сохраняем.
>>> p.document_save()
# Теперь есть id, document_id, document_start и document_end.
>>> p.id
1
>>> p.document_id
1
# Access database columns via Python attributes.
>>> print p.question
Who is who?
>>> p.document_start # doctest: +ELLIPSIS
datetime.datetime(...)
# Give the Poll a couple of Choices.
>>> now = datetime.now()
>>> p = Poll.document_get(now,document_id=1)
# Display any choices from the related object set -- none so far.
>>> PollChoices.at(now)
[]
# Create three choices.
>>> c1 = Choice(choice='President') ; c1.document_save() ; c1
<Choice: President>
>>> c2 = Choice(choice='Agent') ; c2.document_save() ; c2
<Choice: Agent>
>>> c3 = Choice(choice='Gena Crocodile') ; c3.document_save() ; c3
<Choice: Gena Crocodile>
# document_id назначен автоматически:
>>> for c in(c1,c2,c3) : print c.document_id
1
2
3
# Добавим их:
>>> p.document_save() # новая версия
>>> p.pollchoices_set.add( *[ PollChoices(choice=c) for c in(c1,c2,c3) ] )
# Голосование:
>>> PollResults.vote( 1 , 1 )
1
>>> PollResults.vote( 1 , 1 )
1
>>> PollResults.vote( 1 , 1 )
1
>>> PollResults.vote( 1 , 2 )
4
# Запомним момент в промежутке(t):
>>> from time import sleep
>>> sleep( 0.1 )
>>> t = datetime.now()
>>> sleep( 0.1 )
>>> PollResults.vote( 1 , 2 )
4
>>> PollResults.vote( 1 , 3 )
6
# Результаты голосования:
>>> for r in PollResults.at( datetime.now() , poll__document_id=1 ) :
... print r.votes , r.choice.choice
3 President
2 Agent
1 Gena Crocodile
# Ретроспекция(состояние на момент t):
>>> for r in PollResults.at( t , poll__document_id=1 ) :
... print r.votes , r.choice.choice
3 President
1 Agent
# Очистка:
>>> for m in( Choice , Poll , PollChoices , PollResults ) :
... m.objects.all().delete()
"""
# another modification of the same example (with a link to specific version)
class Poll2(Document):
question = models.CharField(max_length=200)
def __unicode__(self):
return self.question
class Choice2(Document):
poll = models.ForeignKey(Poll2)
choice = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice
polltest2 = """
>>> Poll, Choice = Poll2, Choice2
# Пока нет голосований
>>> Poll.objects.all()
[]
# И в текущий момент тоже
>>> Poll.now.all()
[]
>>> p = Poll(question='who is who?')
>>> p.document_save()
>>> p.id
1
>>> p.document_id
1
>>> print p.question
who is who?
>>> p.document_start # doctest: +ELLIPSIS
datetime.datetime(...)
# если не продвинуться по времени, now показывает старое стостояние
>>> Poll.now.count()
0
>>> Poll.objects.count()
1
# продвигаемся вперед по времени
>>> set_now()
>>> Poll.now.count()
1
>>> p = Poll.now.get(document_id=1)
>>> print p
who is who?
# редактируем документ
>>> set_now()
>>> p.question = 'Who is who here?'
>>> p.document_save()
>>> set_now()
>>> p = Poll.now.get(document_id=p.document_id)
>>> print p
Who is who here?
>>> Poll.objects.count()
2
>>> Poll.now.count()
1
# удаляем документ
>>> set_now()
>>> p.document_delete()
1
>>> set_now()
>>> Poll.now.count()
0
>>> Poll.objects.count()
2
# создаем новый вопрос
>>> p = Poll(question='who is who?')
>>> p.document_save()
>>> p.id
3
>>> p.document_id
3
# добавляем варианты ответа
>>> Choice.now.count()
0
>>> Choice(choice='President', poll=p).document_save()
>>> Choice(choice='Agent', poll=p).document_save()
>>> Choice(choice='Gena Crocodile', poll=p).document_save()
>>> set_now()
>>> p.choice2_set.count()
3
>>> p.question = 'who is who? (take 2)'
>>> p.document_save()
>>> set_now()
>>> p.choice2_set.count()
0
# это не то что нам нужно!
#>>> import pdb; pdb.set_trace()
"""
# another modification of the same example (with a link to current version)
class Poll3(Document):
question = models.CharField(max_length=200)
def __unicode__(self):
return self.question
class Choice3(Document):
poll = DocumentForeignKey(Poll2)
choice = models.CharField(max_length=200)
votes = models.IntegerField()
def __unicode__(self):
return self.choice
# models for unit-tests
class SimpleDocument(Document):
data = models.IntegerField()
def __unicode__(self):
return '%s(%s,%s,%s,%s)' % (
self.data, self.document_id, self.id,
self.document_start, self.document_end)
class SimpleDocumentChild(SimpleDocument):
cdata = models.IntegerField()
def __unicode__(self):
return unicode(self.cdata) + ' - ' + \
super(SimpleDocumentChild, self).__unicode__()
class FPart(DocumentPartF):
partdata = models.IntegerField()
def __unicode__(self):
return '%s,%s' % (self.partdata, self.id)
class DocumentF(Document):
data = models.IntegerField()
link = models.OneToOneField(FPart)
def __unicode__(self):
return '%s,%s(%s,%s,%s,%s)' % (
self.data, self.link_id, self.document_id, self.id,
self.document_start, self.document_end)
class FFPart0(DocumentPartF):
partdata = models.IntegerField()
def __unicode__(self):
return '%s,%s' % (self.partdata, self.id)
class FFPart(DocumentPartF):
partlink = models.OneToOneField(FFPart0)
def __unicode__(self):
return '%s,%s,%s' % (
self.partdata, self.partlink_id, self.id)
class DocumentFF(Document):
data = models.IntegerField()
link = models.OneToOneField(FFPart)
def __unicode__(self):
return '%s,%s(%s,%s,%s,%s)' % (
self.data, self.link_id, self.document_id, self.id,
self.document_start, self.document_end)
class DocumentB(Document):
data = models.IntegerField()
def __unicode__(self):
return '%s(%s,%s,%s,%s)' % (
self.data, self.document_id, self.id,
self.document_start, self.document_end)
class BPart(DocumentPartB):
partdata = models.IntegerField()
link = models.ForeignKey(DocumentB)
def __unicode__(self):
return '%s,%s(%s)' % (self.partdata, self.link_id, self.id)
class FBPart(DocumentPartF):
@classmethod
def to_master(cls):
return 'documentfb'
class FBPart0(DocumentPartB):
partlink = models.OneToOneField(FBPart)
partdata = models.IntegerField()
class DocumentFB(Document):
data = models.IntegerField()
link = models.OneToOneField(FBPart)
class DocumentFKSourceString(Document):
link = DocumentForeignKey('DocumentFKDestination')
class DocumentFKDestination(Document):
data = models.IntegerField()
class DocumentFKSource(Document):
link = DocumentForeignKey(DocumentFKDestination)
__test__ = {
'polltest': polltest,
'polltest2': polltest2,
#'polltest3': polltest3,
}
class SimpleDocumentBulkTest(TestCase):
def tearDown(self):
SimpleDocument.objects.all().delete()
def test_bulk_delete(self):
d1 = SimpleDocument(data=1)
d2 = SimpleDocument(data=2)
d3 = SimpleDocument(data=3)
SimpleDocument.bulk_documents_save([d1, d2, d3])
for data in range(1, 4):
d = SimpleDocument.objects.get(data=data)
self.assertEqual(d.data, data)
self.assertEqual(d.document_id, d.id)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
SimpleDocument.bulk_documents_delete([d1, d2, d3])
for data in range(1, 4):
self.assertRaises(SimpleDocument.DoesNotExist,
SimpleDocument.now.get, data=data)
def test_document_save_many(self):
d1 = SimpleDocument(data=1)
d2 = SimpleDocument(data=2)
d3 = SimpleDocument(data=3)
SimpleDocument.bulk_documents_save([d1, d2, d3])
for data in range(1, 4):
d = SimpleDocument.objects.get(data=data)
self.assertEqual(d.data, data)
self.assertEqual(d.document_id, d.id)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
def test_document_save(self):
d = SimpleDocument(data=1)
SimpleDocument.bulk_documents_save([d])
d = SimpleDocument.objects.get(data=1)
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, d.id)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
def test_document_save_1(self):
d = SimpleDocument(data=1, document_id=123, id=17)
self.assertRaises(SimpleDocument.ChangedAlready,
SimpleDocument.bulk_documents_save, [d])
def test_document_save_2(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
t = datetime.now()
d.data = 2
SimpleDocument.bulk_documents_save([d])
self.assertEqual(
SimpleDocument.objects.filter(document_id=123).count(), 2)
d = SimpleDocument.objects.order_by('-id')[0]
self.assertEqual(d.data, 2)
self.assertEqual(d.document_id, 123)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
d = SimpleDocument.objects.order_by('id')[0]
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_save_3(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
t = datetime.now()
d.data = 2
d.id = None
SimpleDocument.bulk_documents_save([d])
self.assertEqual(
SimpleDocument.objects.filter(document_id=123).count(), 2)
d = SimpleDocument.objects.order_by('-id')[0]
self.assertEqual(d.data, 2)
self.assertEqual(d.document_id, 123)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
d = SimpleDocument.objects.order_by('id')[0]
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_delete(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
t = datetime.now()
d.document_delete()
self.assertEqual(
SimpleDocument.objects.filter(document_id=123).count(), 1)
d = SimpleDocument.objects.get()
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_get_or_404(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
d = SimpleDocument.document_get_or_404(
datetime.now(), document_id=123)
self.assertEqual(d.data, 1)
def test_document_get_or_404_1(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
self.assertRaises(Http404, SimpleDocument.document_get_or_404,
datetime.now(), document_id=12)
def test_document_get(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
d = SimpleDocument.document_get(datetime.now(), document_id=123)
self.assertEqual(d.data, 1)
def test_document_get_1(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
self.assertRaises(SimpleDocument.DoesNotExist,
SimpleDocument.document_get,
datetime.now(), document_id=12)
def test_at(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
id1 = d.id
t = datetime.now()
sleep(0.001)
d.data = 2
SimpleDocument.bulk_documents_save([d])
id2 = d.id
self.assertEqual(SimpleDocument.at(t).get().id, id1)
self.assertEqual(SimpleDocument.at(datetime.now()).get().id, id2)
def test_history(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
t = datetime.now()
d.data = 2
SimpleDocument.bulk_documents_save([d])
self.assertEqual(d.history().count(), 2)
self.assertEqual(d.history()[0].data, 2)
self.assertEqual(d.history()[1].data, 1)
def test_document_restore(self):
d = SimpleDocument(data=1, document_id=123)
SimpleDocument.bulk_documents_save([d])
t1 = datetime.now()
sleep(0.001)
d.data = 2
SimpleDocument.bulk_documents_save([d])
t2 = datetime.now()
sleep(0.001)
self.assertEqual(SimpleDocument.at(t2).get(document_id=123).data, 2)
self.assertEqual(SimpleDocument.at(t1).get(document_id=123).data, 1)
SimpleDocument.at(t1).get(document_id=123).document_restore()
self.assertEqual(
SimpleDocument.at(datetime.now()).get(document_id=123).data, 1)
sleep(0.001)
SimpleDocument.at(t2).get(document_id=123).document_restore()
self.assertEqual(
SimpleDocument.at(datetime.now()).get(document_id=123).data, 2)
class SimpleDocumentTest(TestCase):
def tearDown(self):
SimpleDocument.objects.all().delete()
def test_document_save(self):
d = SimpleDocument(data=1)
d.document_save()
d = SimpleDocument.objects.get(data=1)
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, d.id)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
def test_document_save_1(self):
d = SimpleDocument(data=1, document_id=123, id=17)
self.assertRaises(Document.ChangedAlready, d.document_save)
def test_document_save_2(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
t = datetime.now()
d.data = 2
d.document_save()
self.assertEqual(
SimpleDocument.objects.filter(document_id=123).count(), 2)
d = SimpleDocument.objects.order_by('-id')[0]
self.assertEqual(d.data, 2)
self.assertEqual(d.document_id, 123)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
d = SimpleDocument.objects.order_by('id')[0]
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_save_3(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
t = datetime.now()
d.data = 2
d.id = None
d.document_save()
self.assertEqual(
SimpleDocument.objects.filter(document_id=123).count(), 2)
d = SimpleDocument.objects.order_by('-id')[0]
self.assertEqual(d.data, 2)
self.assertEqual(d.document_id, 123)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
d = SimpleDocument.objects.order_by('id')[0]
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_delete(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
t = datetime.now()
d.document_delete()
self.assertEqual(
SimpleDocument.objects.filter(document_id=123).count(), 1)
d = SimpleDocument.objects.get()
self.assertEqual(d.data, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_get_or_404(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
d = SimpleDocument.document_get_or_404(
datetime.now(), document_id=123)
self.assertEqual(d.data, 1)
def test_document_get_or_404_1(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
self.assertRaises(Http404, SimpleDocument.document_get_or_404,
datetime.now(), document_id=12)
def test_document_get(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
d = SimpleDocument.document_get(datetime.now(), document_id=123)
self.assertEqual(d.data, 1)
def test_document_get_1(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
self.assertRaises(SimpleDocument.DoesNotExist,
SimpleDocument.document_get,
datetime.now(), document_id=12)
def test_at(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
id1 = d.id
t = datetime.now()
sleep(0.001)
d.data = 2
d.document_save()
id2 = d.id
self.assertEqual(SimpleDocument.at(t).get().id, id1)
self.assertEqual(SimpleDocument.at(datetime.now()).get().id, id2)
def test_history(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
t = datetime.now()
d.data = 2
d.document_save()
self.assertEqual(d.history().count(), 2)
self.assertEqual(d.history()[0].data, 2)
self.assertEqual(d.history()[1].data, 1)
def test_document_restore(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
t1 = datetime.now()
sleep(0.001)
d.data = 2
d.document_save()
t2 = datetime.now()
sleep(0.001)
self.assertEqual(SimpleDocument.at(t2).get(document_id=123).data, 2)
self.assertEqual(SimpleDocument.at(t1).get(document_id=123).data, 1)
SimpleDocument.at(t1).get(document_id=123).document_restore()
self.assertEqual(
SimpleDocument.at(datetime.now()).get(document_id=123).data, 1)
sleep(0.001)
SimpleDocument.at(t2).get(document_id=123).document_restore()
self.assertEqual(
SimpleDocument.at(datetime.now()).get(document_id=123).data, 2)
class DocumentPartFTest(TestCase):
def tearDown(self):
FPart.objects.all().delete()
DocumentF.objects.all().delete()
def test_history(self):
p1 = FPart(partdata=1)
p1.save()
p2 = FPart(partdata=2)
p2.save()
before = datetime.now()
sleep(0.001)
d = DocumentF(data=1, link=p1)
d.document_save()
sleep(0.001)
inter = datetime.now()
d.link = p2
d.document_save()
sleep(0.001)
after = datetime.now()
self.assertEqual(p1.history().count(), 2)
self.assertEqual(p1.history()[0].partdata, 2)
self.assertEqual(p1.history()[1].partdata, 1)
self.assertEqual(p2.history().count(), 2)
self.assertEqual(p2.history()[0].partdata, 2)
self.assertEqual(p2.history()[1].partdata, 1)
def test_document_f(self):
p1 = FPart(partdata=1)
p1.save()
p2 = FPart(partdata=2)
p2.save()
before = datetime.now()
sleep(0.001)
d = DocumentF(data=1, link=p1)
d.document_save()
sleep(0.001)
inter = datetime.now()
d.link = p2
d.document_save()
sleep(0.001)
after = datetime.now()
self.assertEqual(FPart.at(before).count(), 0)
self.assertEqual(FPart.document_get(inter).partdata, 1)
self.assertEqual(FPart.document_get(after).partdata, 2)
class DocumentPartFFTest(TestCase):
def tearDown(self):
FFPart0.objects.all().delete()
FFPart.objects.all().delete()
DocumentFF.objects.all().delete()
def test_document_ff(self):
p1 = FFPart0(partdata=1)
p1.save()
pp1 = FFPart(partlink=p1)
pp1.save()
d = DocumentFF(data=1, link=pp1)
d.document_save()
after = datetime.now()
self.assertEqual(FFPart0.document_get(after).partdata, 1)
def test_document_ff_1(self):
p1 = FFPart0(partdata=1)
p1.save()
pp1 = FFPart(partlink=p1)
pp1.save()
p2 = FFPart0(partdata=2)
p2.save()
pp2 = FFPart(partlink=p2)
pp2.save()
before = datetime.now()
sleep(0.001)
d = DocumentFF(data=1, link=pp1)
d.document_save()
sleep(0.001)
inter = datetime.now()
d.link = pp2
d.document_save()
sleep(0.001)
after = datetime.now()
self.assertEqual(FFPart0.at(before).count(), 0)
self.assertEqual(FFPart0.document_get(inter).partdata, 1)
self.assertEqual(FFPart0.document_get(after).partdata, 2)
class DocumentPartBTest(TestCase):
def tearDown(self):
BPart.objects.all().delete()
DocumentB.objects.all().delete()
def test_document_b(self):
before = datetime.now()
sleep(0.001)
d = DocumentB(data=1)
d.document_save()
p = BPart(partdata=1)
d.bpart_set.add(p)
sleep(0.001)
inter = datetime.now()
d.data = 2
d.document_save()
p = BPart(partdata=2)
d.bpart_set.add(p)
sleep(0.001)
after = datetime.now()
self.assertEqual(BPart.at(before).count(), 0)
self.assertEqual(BPart.document_get(inter).partdata, 1)
self.assertEqual(BPart.document_get(after).partdata, 2)
class DocumentPartFBTest(TestCase):
def tearDown(self):
FBPart0.objects.all().delete()
FBPart.objects.all().delete()
DocumentFB.objects.all().delete()
def test_document_fb(self):
pp1 = FBPart()
pp1.save()
p1 = FBPart0(partlink=pp1, partdata=1)
p1.save()
pp2 = FBPart()
pp2.save()
p2 = FBPart0(partlink=pp2, partdata=2)
p2.save()
before = datetime.now()
sleep(0.001)
d = DocumentFB(data=1, link=pp1)
d.document_save()
sleep(0.001)
inter = datetime.now()
d.link = pp2
d.document_save()
sleep(0.001)
after = datetime.now()
self.assertEqual(FBPart0.at(before).count(), 0)
self.assertEqual(FBPart0.document_get(inter).partdata, 1)
self.assertEqual(FBPart0.document_get(after).partdata, 2)
class DocumentFK(TestCase):
def tearDown(self):
DocumentFKSource.objects.all().delete()
DocumentFKDestination.objects.all().delete()
def test_document_fk(self):
before = datetime.now()
sleep(0.001)
dd = DocumentFKDestination(data=1)
dd.document_save()
sleep(0.001)
inter = datetime.now()
dd.data = 2
dd.document_save()
sleep(0.001)
after = datetime.now()
ds = DocumentFKSource()
ds.link = dd.document_id
ds.document_save()
ds = DocumentFKSource.objects.get(pk=1)
self.assertRaises(DocumentFKDestination.DoesNotExist,
DocumentFKDestination.at(before).get)
self.assertEqual(DocumentFKDestination.at(inter).get(
document_id=ds.link).data, 1)
self.assertEqual(DocumentFKDestination.at(after).get(
document_id=ds.link).data, 2)
class SimpleDocumentChildTest(TestCase):
def tearDown(self):
SimpleDocument.objects.all().delete()
SimpleDocumentChild.objects.all().delete()
def test_document_save_0(self):
d = SimpleDocumentChild(data=1, cdata=11)
d.document_save()
d = SimpleDocumentChild.objects.get(data=1)
self.assertEqual(d.cdata, 11)
sleep(0.001)
d.cdata = 111
d.document_save()
d = SimpleDocumentChild.document_get(datetime.now(), data=1)
self.assertEqual(d.cdata, 111)
def test_document_save(self):
d = SimpleDocumentChild(data=1000, cdata=1)
d.document_save()
d = SimpleDocumentChild.objects.get(cdata=1)
self.assertEqual(d.cdata, 1)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
def test_document_save_1(self):
d = SimpleDocumentChild(data=1000, cdata=1,
document_id=123, id=17)
self.assertRaises(Document.ChangedAlready, d.document_save)
def test_document_save_2(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
t = datetime.now()
d.cdata = 2
d.document_save()
self.assertEqual(
SimpleDocumentChild.objects.filter(document_id=123).count(), 2)
d = SimpleDocumentChild.objects.order_by('-id')[0]
self.assertEqual(d.cdata, 2)
self.assertEqual(d.document_id, 123)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
d = SimpleDocumentChild.objects.order_by('id')[0]
self.assertEqual(d.cdata, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_save_3(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
t = datetime.now()
d.cdata = 2
d.id = None
d.document_save()
self.assertEqual(
SimpleDocumentChild.objects.filter(document_id=123).count(), 2)
d = SimpleDocumentChild.objects.order_by('-id')[0]
self.assertEqual(d.cdata, 2)
self.assertEqual(d.document_id, 123)
self.assertEqual(d.document_end, datetime.max)
self.assertTrue(d.document_start <= datetime.now())
d = SimpleDocumentChild.objects.order_by('id')[0]
self.assertEqual(d.cdata, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_delete(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
t = datetime.now()
d.document_delete()
self.assertEqual(
SimpleDocumentChild.objects.filter(document_id=123).count(), 1)
d = SimpleDocumentChild.objects.get()
self.assertEqual(d.cdata, 1)
self.assertEqual(d.document_id, 123)
self.assertTrue(d.document_end <= datetime.now())
self.assertTrue(d.document_start <= t)
def test_document_get_or_404(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
d = SimpleDocumentChild.document_get_or_404(
datetime.now(), document_id=123)
self.assertEqual(d.cdata, 1)
def test_document_get_or_404_1(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
self.assertRaises(Http404, SimpleDocumentChild.document_get_or_404,
datetime.now(), document_id=12)
def test_document_get(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
d = SimpleDocumentChild.document_get(
datetime.now(), document_id=123)
self.assertEqual(d.cdata, 1)
def test_document_get_1(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
self.assertRaises(SimpleDocumentChild.DoesNotExist,
SimpleDocumentChild.document_get,
datetime.now(), document_id=12)
def test_at(self):
d = SimpleDocumentChild(data=1000, cdata=1, document_id=123)
d.document_save()
id1 = d.id
t = datetime.now()
sleep(0.001)
d.cdata = 2
d.document_save()
id2 = d.id
self.assertNotEqual(id1, id2)
self.assertEqual(SimpleDocumentChild.at(t).get().id, id1)
self.assertEqual(
SimpleDocumentChild.at(datetime.now()).get().id, id2)
class TestNow(TestCase):
def test_now(self):
t1 = now()
sleep(0.001)
t2 = now()
self.assertEqual(t1, t2)
set_now()
t2 = now()
self.assertFalse(t1 == t2)
class TestNowManager(TestCase):
def tearDown(self):
SimpleDocument.objects.all().delete()
def test_manager(self):
d = SimpleDocument(data=1, document_id=123)
d.document_save()
id1 = d.id
t = datetime.now()
sleep(0.001)
d.data = 2
d.document_save()
id2 = d.id
set_now()
self.assertEqual(SimpleDocument.now.get().id, id2)
set_now(t)
self.assertEqual(SimpleDocument.now.get().id, id1)
class Test_save_now(TestCase):
def tearDown(self):
SimpleDocument.objects.all().delete()
def test_save_now(self):
d = SimpleDocument(data=1, document_id=123)
d.save_now()
id1 = d.id
sleep(0.001)
d.data = 2
d.save_now()
id2 = d.id
self.assertEqual(SimpleDocument.now.get().id, id2)
| {
"content_hash": "466ee79d2695a1983cd9bd0843d3b1ae",
"timestamp": "",
"source": "github",
"line_count": 1045,
"max_line_length": 79,
"avg_line_length": 29.921531100478468,
"alnum_prop": 0.6117436356658564,
"repo_name": "chtd/doc-versions",
"id": "db909962e896553407177efb6516ae9f1b4fa335",
"size": "31689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "documents/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70371"
}
],
"symlink_target": ""
} |
class UrlNotAvailable(Exception):
pass
class CommandFailed(Exception):
pass
| {
"content_hash": "a13913e440417657e3f7dbc128549758",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.7441860465116279,
"repo_name": "nebril/fuel-web",
"id": "186bb0331433268a8c1b0582033ab287c234b4c0",
"size": "697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "network_checker/url_access_checker/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "61043"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "JavaScript",
"bytes": "724039"
},
{
"name": "Mako",
"bytes": "1449"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3974663"
},
{
"name": "Ruby",
"bytes": "33991"
},
{
"name": "Shell",
"bytes": "28796"
}
],
"symlink_target": ""
} |
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
import six
from six.moves import range
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
ImportRename = collections.namedtuple(
"ImportRename", ["new_name", "excluded_prefixes"])
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = six.ensure_str(name).split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
def get_arg_value(node, arg_name, arg_pos=None):
"""Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
"""
# Check keyword args
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
# Check positional args
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't parse Starred
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
def uses_star_args_in_call(node):
"""Check if an ast.Call node uses arbitrary-length positional *args.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for an *args usage in python 3.5+
for arg in node.args:
if isinstance(arg, ast.Starred):
return True
else:
if node.starargs:
return True
return False
def uses_star_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for a **kwarg usage in python 3.5+
for keyword in node.keywords:
if keyword.arg is None:
return True
else:
if node.kwargs:
return True
return False
def uses_star_args_or_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length *args or **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)
def excluded_from_module_rename(module, import_rename_spec):
"""Check if this module import should not be renamed.
Args:
module: (string) module name.
import_rename_spec: ImportRename instance.
Returns:
True if this import should not be renamed according to the
import_rename_spec.
"""
for excluded_prefix in import_rename_spec.excluded_prefixes:
if module.startswith(excluded_prefix):
return True
return False
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
def preprocess(self, root_node): # pylint: disable=unused-argument
"""Preprocess a parse tree. Return any produced logs and errors."""
return [], []
def clear_preprocessing(self):
"""Restore this APIChangeSpec to before it preprocessed a file.
This is needed if preprocessing a file changed any rewriting rules.
"""
pass
class NoUpdateSpec(APIChangeSpec):
"""A specification of an API change which doesn't change anything."""
def __init__(self):
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = {}
self.function_transformers = {}
self.import_renames = {}
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + six.ensure_str(name) if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + six.ensure_str(name) if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
level, message = function_warnings[full_name]
message = six.ensure_str(message).replace("<function name>", full_name)
self.add_log(level, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name, message))
return True
else:
return False
def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):
"""Adds a warning if full_name is a deprecated module."""
warnings = self._api_change_spec.module_deprecations
if full_name in warnings:
level, message = warnings[full_name]
message = six.ensure_str(message).replace("<function name>",
six.ensure_str(whole_name))
self.add_log(level, node.lineno, node.col_offset,
"Using member %s in deprecated module %s. %s" % (whole_name,
full_name,
message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + six.ensure_str(name))
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
variadic_args = uses_star_args_or_kwargs_in_call(node)
for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):
present, _ = get_arg_value(node, kwarg, arg) or variadic_args
if present:
warned = True
warning_message = six.ensure_str(warning).replace(
"<function name>", six.ensure_str(full_name or name))
template = "%s called with %s argument, requires manual check: %s"
if variadic_args:
template = ("%s called with *args or **kwargs that may include %s, "
"requires manual check: %s")
self.add_log(level, node.lineno, node.col_offset,
template % (full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
if six.PY2:
new_node = ast.Call(node, [], [], None, None)
else:
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
if uses_star_args_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"re-ordering the call arguments, but it was passed "
"variable-length positional *args. The upgrade "
"script cannot handle these automatically." % full_name)
reordered = function_reorders[full_name]
new_keywords = []
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
idx += 1
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
if uses_star_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"renaming or removing call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
if transformers:
if uses_star_args_or_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"modifying call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node and new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
# The isinstance check is enough -- a bare Attribute is never root.
i = 2
while isinstance(self._stack[-i], ast.Attribute):
i += 1
whole_name = pasta.dump(self._stack[-(i-1)])
self._maybe_add_module_deprecation_warning(node, full_name, whole_name)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, "import_renames", {})
max_submodule_depth = getattr(self._api_change_spec, "max_submodule_depth",
1)
inserts_after_imports = getattr(self._api_change_spec,
"inserts_after_imports", {})
# This loop processes imports in the format
# import foo as f, bar as b
for import_alias in node.names:
all_import_components = six.ensure_str(import_alias.name).split(".")
# Look for rename, starting with longest import levels.
found_update = False
for i in reversed(list(range(1, max_submodule_depth + 1))):
import_component = all_import_components[0]
for j in range(1, min(i, len(all_import_components))):
import_component += "." + six.ensure_str(all_import_components[j])
import_rename_spec = import_renames.get(import_component, None)
if not import_rename_spec or excluded_from_module_rename(
import_alias.name, import_rename_spec):
continue
new_name = (
import_rename_spec.new_name +
import_alias.name[len(import_component):])
# If current import is
# import foo
# then new import should preserve imported name:
# import new_foo as foo
# This happens when module has just one component.
new_asname = import_alias.asname
if not new_asname and "." not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
found_update = True
# Insert any followup lines that should happen after this import.
full_import = (import_alias.name, import_alias.asname)
insert_offset = 1
for line_to_insert in inserts_after_imports.get(full_import, []):
assert self._stack[-1] is node
parent = self._stack[-2]
new_line_node = pasta.parse(line_to_insert)
ast.copy_location(new_line_node, node)
parent.body.insert(
parent.body.index(node) + insert_offset, new_line_node)
insert_offset += 1
# Insert a newline after the import if necessary
old_suffix = pasta.base.formatting.get(node, "suffix")
if old_suffix is None:
old_suffix = os.linesep
if os.linesep not in old_suffix:
pasta.base.formatting.set(node, "suffix",
six.ensure_str(old_suffix) + os.linesep)
# Apply indentation to new node.
pasta.base.formatting.set(new_line_node, "prefix",
pasta.base.formatting.get(node, "prefix"))
pasta.base.formatting.set(new_line_node, "suffix", os.linesep)
self.add_log(
INFO, node.lineno, node.col_offset,
"Adding `%s` after import of %s" %
(new_line_node, import_alias.name))
# Find one match, break
if found_update:
break
# No rename is found for all levels
if not found_update:
new_aliases.append(import_alias) # no change needed
# Replace the node if at least one import needs to be updated.
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r." %
(pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
from_import_first_component = six.ensure_str(from_import).split(".")[0]
import_renames = getattr(self._api_change_spec, "import_renames", {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
# Split module aliases into the ones that require import update
# and those that don't. For e.g. if we want to rename "a" to "b"
# unless we import "a.c" in the following:
# from a import c, d
# we want to update import for "d" but not for "c".
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = "%s.%s" % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
# Replace first component of from-import with new name.
new_from_import = (
import_rename_spec.new_name +
from_import[len(from_import_first_component):])
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
# If some imports had to stay the same, add another import for them.
additional_import_log = ""
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level,
col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
# Apply indentation to new node.
pasta.base.formatting.set(
same_node, "prefix",
pasta.base.formatting.get(updated_node, "prefix"))
additional_import_log = " and %r" % pasta.dump(same_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r%s." %
(pasta.dump(node),
pasta.dump(updated_node),
additional_import_log))
self.generic_visit(node)
class AnalysisResult(object):
"""This class represents an analysis result and how it should be logged.
This class must provide the following fields:
* `log_level`: The log level to which this detection should be logged
* `log_message`: The message that should be logged for this detection
For an example, see `VersionedTFImport`.
"""
class APIAnalysisSpec(object):
"""This class defines how `AnalysisResult`s should be generated.
It specifies how to map imports and symbols to `AnalysisResult`s.
This class must provide the following fields:
* `symbols_to_detect`: maps function names to `AnalysisResult`s
* `imports_to_detect`: maps imports represented as (full module name, alias)
tuples to `AnalysisResult`s
notifications)
For an example, see `TFAPIImportAnalysisSpec`.
"""
class PastaAnalyzeVisitor(_PastaEditVisitor):
"""AST Visitor that looks for specific API usage without editing anything.
This is used before any rewriting is done to detect if any symbols are used
that require changing imports or disabling rewriting altogether.
"""
def __init__(self, api_analysis_spec):
super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec())
self._api_analysis_spec = api_analysis_spec
self._results = [] # Holds AnalysisResult objects
@property
def results(self):
return self._results
def add_result(self, analysis_result):
self._results.append(analysis_result)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
full_name = self._get_full_name(node)
if full_name:
detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None)
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
for import_alias in node.names:
# Detect based on full import name and alias)
full_import = (import_alias.name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
for import_alias in node.names:
# Detect based on full import name(to & as)
full_module_name = "%s.%s" % (from_import, import_alias.name)
full_import = (full_module_name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self,
in_filename,
out_filename,
no_change_to_outfile_on_error=False):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
no_change_to_outfile_on_error: not modify the output file on errors
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
if no_change_to_outfile_on_error and ret[0] == 0:
os.remove(temp_file.name)
else:
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return six.ensure_str(in_filename) + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t)
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
self._api_change_spec.clear_preprocessing()
logs = [self.format_log(log, None) for log in (preprocess_logs +
visitor.log)]
errors = [self.format_log(error, in_filename)
for error in (preprocess_errors +
visitor.warnings_and_errors)]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = six.ensure_str("-" * 80) + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += six.ensure_str("-" * 80) + "\n\n"
text += "\n".join(log) + "\n"
text += six.ensure_str("-" * 80) + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string for all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
return self.process_tree_inplace(root_directory)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if six.ensure_str(f).endswith(".py")]
copy_files = [
f for f in file_list if not six.ensure_str(f).endswith(".py")
]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += six.ensure_str(("=" * 80)) + "\n"
report += "Input tree: %r\n" % root_directory
report += six.ensure_str(("=" * 80)) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
if os.path.islink(input_path):
link_target = os.readlink(input_path)
link_target_output = os.path.join(
output_root_directory, os.path.relpath(link_target, root_directory))
if (link_target, link_target_output) in files_to_process:
# Create a link to the new location of the target file
os.symlink(link_target_output, output_path)
else:
report += "Copying symlink %s without modifying its target %s" % (
input_path, link_target)
os.symlink(link_target, output_path)
continue
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [
os.path.join(dir_name, f)
for f in file_list
if six.ensure_str(f).endswith(".py")
]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += six.ensure_str(("=" * 80)) + "\n"
report += "Input tree: %r\n" % root_directory
report += six.ensure_str(("=" * 80)) + "\n"
for path in files_to_process:
if os.path.islink(path):
report += "Skipping symlink %s.\n" % path
continue
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
| {
"content_hash": "1b5d6d0fe21f08029e1358a2619e9d9b",
"timestamp": "",
"source": "github",
"line_count": 1099,
"max_line_length": 80,
"avg_line_length": 36.101000909918106,
"alnum_prop": 0.6327410207939509,
"repo_name": "renyi533/tensorflow",
"id": "fa1e8def53dce52a7ea43c008d5cb8620152632f",
"size": "40392",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/tools/compatibility/ast_edits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Test imageactions.py for vimiv's test suite."""
import os
import shutil
import time
from unittest import TestCase, main
import vimiv.imageactions as imageactions
from gi import require_version
require_version('GdkPixbuf', '2.0')
from gi.repository import GdkPixbuf
from vimiv_testcase import compare_files
class ImageActionsTest(TestCase):
"""Imageactions Tests."""
def setUp(self):
self.working_directory = os.getcwd()
os.chdir("vimiv/testimages/")
self.orig = os.path.abspath("arch_001.jpg")
self.filename = os.path.abspath("image_to_edit.jpg")
self.filename_2 = os.path.abspath("image_to_edit_2.jpg")
self._waiting = False # Used to wait for autorotate
shutil.copyfile(self.orig, self.filename)
shutil.copyfile(self.orig, self.filename_2)
def test_rotate(self):
"""Rotate image file."""
def do_rotate_test(rotate_int):
"""Run the rotation test.
Args:
rotate_int: Number defining the rotation.
"""
pb = GdkPixbuf.Pixbuf.new_from_file(self.filename)
orientation_before = pb.get_width() < pb.get_height()
imageactions.rotate_file(self.filename, rotate_int)
pb = GdkPixbuf.Pixbuf.new_from_file(self.filename)
orientation_after = pb.get_width() < pb.get_height()
if rotate_int in [1, 3]:
self.assertNotEqual(orientation_before, orientation_after)
elif rotate_int == 2:
self.assertEqual(orientation_before, orientation_after)
# Rotate counterclockwise
do_rotate_test(1)
# Rotate clockwise
do_rotate_test(3)
# Images are now equal again
self.assertTrue(compare_files(self.orig, self.filename))
# Rotate 180
do_rotate_test(2)
# Images are not equal
self.assertFalse(compare_files(self.orig, self.filename))
def test_flip(self):
"""Flipping of files."""
# Images equal before the flip
self.assertTrue(compare_files(self.orig, self.filename))
# Images differ after the flip
imageactions.flip_file(self.filename, False)
self.assertFalse(compare_files(self.orig, self.filename))
# Images equal after flipping again
imageactions.flip_file(self.filename, False)
self.assertTrue(compare_files(self.orig, self.filename))
# Same for horizontal flip
# Images equal before the flip
self.assertTrue(compare_files(self.orig, self.filename))
# Images differ after the flip
imageactions.flip_file(self.filename, True)
self.assertFalse(compare_files(self.orig, self.filename))
# Images equal after flipping again
imageactions.flip_file(self.filename, True)
self.assertTrue(compare_files(self.orig, self.filename))
def test_autorotate(self):
"""Autorotate files."""
pb = GdkPixbuf.Pixbuf.new_from_file(self.filename)
orientation_before = pb.get_width() < pb.get_height()
autorotate = imageactions.Autorotate([self.filename])
autorotate.connect("completed", self._on_autorotate_completed)
autorotate.run()
# Wait for it to complete
self._waiting = True
while self._waiting:
time.sleep(0.05)
pb = GdkPixbuf.Pixbuf.new_from_file(self.filename)
orientation_after = pb.get_width() < pb.get_height()
self.assertNotEqual(orientation_before, orientation_after)
def _on_autorotate_completed(self, autorotate, amount):
self._waiting = False
def tearDown(self):
os.chdir(self.working_directory)
os.remove(self.filename)
os.remove(self.filename_2)
if __name__ == "__main__":
main()
| {
"content_hash": "a7cad6625f5723b5f3ad52a457e97cae",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 74,
"avg_line_length": 37.35294117647059,
"alnum_prop": 0.6377952755905512,
"repo_name": "karlch/vimiv",
"id": "e6fc3388a6e354104a0bbcb1f85e2dd972b6addb",
"size": "3860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/imageactions_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11958"
},
{
"name": "Makefile",
"bytes": "2608"
},
{
"name": "Python",
"bytes": "387798"
},
{
"name": "Shell",
"bytes": "2371"
}
],
"symlink_target": ""
} |
'''
Dynamically generating the 4th movement of Ruth Crawford Seeger's String Quartet 1931
'''
from music21 import converter
from music21 import meter
from music21 import note
from music21 import stream
import copy
def lowerLines():
restLengths = [0, 16, 12, 11, 10, 7, 6, 7, 6, 5, 4, 3, 8, 10, 12, 14, 16, 17, 18, 19, 20]
correctTranspositions = [-1, 2, -3, -3, 1, 1, 6, 3, -2] # correct the first note of rotations 13-21
fixLastNoteLengths = {11: 4.5, 12: 3, 13: 2.5, 14: 2, 15: 1.5, 20: 10.5}
currentNote = 0
rotationNumber = 1
myRow = stream.Part()
for phraseNumber in range(1,21):
myRow.append(note.Rest(quarterLength=restLengths[phraseNumber]/2.0))
if phraseNumber == 8: ## inconsistency in RCS's scheme
currentNote += 2
for addNote in range(21 - phraseNumber):
if rotationNumber <= 10 or rotationNumber >= 20:
#default
appendNote = copy.deepcopy(rowNotes[currentNote % 10])
else: # second set of rotations is up a step:
appendNote = rowNotes[currentNote % 10].transpose(2)
# if phraseNumber == 8 and addNote == 9: # mistaken transpositions by RCS
# appendNote = appendNote.transpose(-1)
# appendNote.lyrics.append(note.Lyric(text="*", number=3))
#
# elif phraseNumber == 9 and addNote == 6:
# appendNote = appendNote.transpose(2)
# appendNote.lyrics.append(note.Lyric(text="*", number=3))
if addNote == 0:
if phraseNumber != 8:
appendNote.lyrics.append(note.Lyric(text="p" + str(phraseNumber), number=1))
else:
appendNote.lyrics.append(note.Lyric(text="p8*", number=1))
if (currentNote % 10 == (rotationNumber + 8) % 10) and (currentNote != 0):
currentNote += 2
rotationNumber += 1
else:
if (currentNote % 10 == (rotationNumber + 9) % 10):
appendNote.lyrics.append(note.Lyric(text="r" + str(rotationNumber), number=2))
if rotationNumber in range(13, 22):
appendNote.transpose(correctTranspositions[rotationNumber-13], inPlace = True)
appendNote.pitch.simplifyEnharmonic(inPlace = True)
appendNote.lyrics.append(note.Lyric(text="*", number=3))
currentNote += 1
if addNote == 20-phraseNumber: # correct Last Notes
#if phraseNumber == 12: # bug in Finale for accidental display?
# appendNote.pitch.accidental.displayStatus = True
if phraseNumber in fixLastNoteLengths:
appendNote.quarterLength = fixLastNoteLengths[phraseNumber]
myRow.append(appendNote)
#retrograde
totalNotes = len(myRow)
for i in range(2, totalNotes+1): #skip last note
el = myRow[totalNotes-i]
if 'Note' in el.classes:
elNote = el.transpose('A1')
elNote.pitch.simplifyEnharmonic(inPlace = True)
elNote.lyrics = []
myRow.append(elNote)
else:
elRest = copy.deepcopy(el) # rests
if i == 2:
elRest.quarterLength=11.5
myRow.append(elRest)
myRow.insert(0, meter.TimeSignature('2/2'))
myRow.show()
if __name__ == '__main__':
row = converter.parse('tinynotation: 2/2 d8 e f e- f# a a- g d- c')
rowNotes = row.notes
lowerLines()
| {
"content_hash": "b18e6f7375012f67e1fc279b4e514fbe",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 103,
"avg_line_length": 43.02325581395349,
"alnum_prop": 0.55,
"repo_name": "arnavd96/Cinemiezer",
"id": "cc25f0a0e286aec11defa9da89d248bc929ea0d9",
"size": "3725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myvenv/lib/python3.4/site-packages/music21/demos/composition/seeger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "300501"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "105126"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "290903"
},
{
"name": "JavaScript",
"bytes": "154747"
},
{
"name": "Jupyter Notebook",
"bytes": "558334"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "37092739"
},
{
"name": "Shell",
"bytes": "3668"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_rediscache_facts
version_added: "2.8"
short_description: Get Azure Cache for Redis instance facts
description:
- Get facts for Azure Cache for Redis instance.
options:
resource_group:
description:
- The resource group to search for the desired Azure Cache for Redis
required: True
name:
description:
- Limit results to a specific Azure Cache for Redis.
return_access_keys:
description:
- Indicate weather to return access keys of the Azure Cache for Redis.
default: False
type: bool
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Yunge Zhu (@yungezz)"
'''
EXAMPLES = '''
- name: Get Azure Cache for Redis by name
azure_rm_rediscache_facts:
resource_group: myResourceGroup
name: myRedis
- name: Get Azure Cache for Redis with access keys by name
azure_rm_rediscache_facts:
resource_group: myResourceGroup
name: myRedis
return_access_keys: true
- name: Get Azure Cache for Redis in specific resource group
azure_rm_rediscache_facts:
resource_group: myResourceGroup
'''
RETURN = '''
rediscaches:
description: List of Azure Cache for Redis instances.
returned: always
type: complex
contains:
resource_group:
description:
- Name of a resource group where the Azure Cache for Redis belongs to.
returned: always
type: str
sample: myResourceGroup
name:
description:
- Name of the Azure Cache for Redis.
returned: always
type: str
sample: myRedis
id:
description:
- Id of the Azure Cache for Redis.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis
provisioning_state:
description:
- Provisioning state of the redis cahe
returned: always
type: str
sample: Creating
location:
description:
- Location of the Azure Cache for Redis.
type: str
sample: WestUS
enable_non_ssl_port:
description:
- Specifies whether the non-ssl Redis server port (6379) is enabled.
type: bool
sample: false
sku:
description:
- Dict of sku information.
type: dict
contains:
name:
description: Name of the sku.
returned: always
type: str
sample: standard
size:
description: Size of the Azure Cache for Redis.
returned: always
type: str
sample: C1
static_ip:
description:
- Static IP address.
type: str
sample: 10.75.0.11
subnet:
description:
- The full resource ID of a subnet in a virtual network to deploy the Azure Cache for Redis in.
type: str
sample:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/Microsoft.Network/VirtualNetworks/myVirtualNetwo
rk/subnets/mySubnet"
configuration:
description:
- Dict of redis configuration.
type: dict
sample: maxmeory_reserved
host_name:
description:
- Redis host name.
type: str
sample: testRedis.redis.cache.windows.net
shard_count:
description:
- The number of shards on a Premium Cluster Cache.
type: int
sample: 1
tenant_settings:
description:
- Dict of tenant settings.
type: dict
tags:
description:
- List of tags.
type: list
sample:
- foo
access_keys:
description:
- Azure Cache for Redis access keys.
type: dict
returned: when C(return_access_keys) is true.
contains:
primary:
description: The current primary key that clients can use to authenticate the redis cahce.
type: str
sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
secondary:
description: The current secondary key that clients can use to authenticate the redis cahce.
type: str
sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from azure.common import AzureHttpError
from azure.mgmt.redis import RedisManagementClient
from msrestazure.azure_exceptions import CloudError
except ImportError:
# handled in azure_rm_common
pass
import re
class AzureRMRedisCacheFacts(AzureRMModuleBase):
"""Utility class to get Azure Cache for Redis facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(
type='str',
required=True
),
return_access_keys=dict(
type='bool',
default=False
),
tags=dict(type='list')
)
self.results = dict(
changed=False,
rediscaches=[]
)
self.name = None
self.resource_group = None
self.profile_name = None
self.tags = None
self._client = None
super(AzureRMRedisCacheFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
# get management client
self._client = self.get_mgmt_svc_client(RedisManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-03-01')
if self.name:
self.results['rediscaches'] = self.get_item()
else:
self.results['rediscaches'] = self.list_by_resourcegroup()
return self.results
def get_item(self):
"""Get a single Azure Cache for Redis"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self._client.redis.get(resource_group_name=self.resource_group, name=self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_rediscache(item)]
return result
def list_by_resourcegroup(self):
"""Get all Azure Cache for Redis within a resource group"""
self.log('List all Azure Cache for Redis within a resource group')
try:
response = self._client.redis.list_by_resource_group(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_rediscache(item))
return results
def list_keys(self):
"""List Azure Cache for Redis keys"""
self.log('List keys for {0}'.format(self.name))
item = None
try:
item = self._client.redis.list_keys(resource_group_name=self.resource_group, name=self.name)
except CloudError as exc:
self.fail("Failed to list redis keys of {0} - {1}".format(self.name, str(exc)))
return item
def serialize_rediscache(self, rediscache):
'''
Convert an Azure Cache for Redis object to dict.
:param rediscache: Azure Cache for Redis object
:return: dict
'''
new_result = dict(
id=rediscache.id,
resource_group=re.sub('\\/.*', '', re.sub('.*resourceGroups\\/', '', rediscache.id)),
name=rediscache.name,
location=rediscache.location,
provisioning_state=rediscache.provisioning_state,
configuration=rediscache.redis_configuration,
tenant_settings=rediscache.tenant_settings,
shard_count=rediscache.shard_count,
enable_non_ssl_port=rediscache.enable_non_ssl_port,
static_ip=rediscache.static_ip,
subnet=rediscache.subnet_id,
host_name=rediscache.host_name,
tags=rediscache.tags
)
if rediscache.sku:
new_result['sku'] = dict(
name=rediscache.sku.name.lower(),
size=rediscache.sku.family + str(rediscache.sku.capacity)
)
if self.return_access_keys:
access_keys = self.list_keys()
if access_keys:
new_result['access_keys'] = dict(
primary=access_keys.primary_key,
secondary=access_keys.secondary_key
)
return new_result
def main():
"""Main module execution code path"""
AzureRMRedisCacheFacts()
if __name__ == '__main__':
main()
| {
"content_hash": "41ae6ffd4794132fbe09fff2860d2642",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 150,
"avg_line_length": 30.823170731707318,
"alnum_prop": 0.5539070227497527,
"repo_name": "SergeyCherepanov/ansible",
"id": "0c5c32447c4ce643840d3048502ecfb9f8b696f3",
"size": "10280",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/azure/azure_rm_rediscache_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""Module to access site configuration in siteconfig.ini."""
from ConfigParser import ConfigParser
from flask import g
FILENAME = '/srv/oclubs/siteconfig.ini'
def _done(commit=True):
if g.get('siteconfigParser', None):
if commit:
if g.get('siteconfigHasWrites', False):
with open(FILENAME, 'w') as configfile:
g.siteconfigParser.write(configfile)
g.siteconfigParser = None
del g.siteconfigParser
g.siteconfigHasWrites = None
del g.siteconfigHasWrites
def _get_parser():
if g.get('siteconfigParser', None):
return g.siteconfigParser
g.siteconfigParser = ConfigParser()
g.siteconfigParser.read(FILENAME)
return g.siteconfigParser
def get_config(name):
"""
Get a site configuration boolean.
:param basestring name: name of site configuration
:returns: value of site configuration
:rtype: bool
"""
return _get_parser().getboolean('siteconfig', name)
def set_config(name, value):
"""
Set a site configuration boolean.
:param basestring name: name of site configuration
:param bool value: new value of site configuration
"""
# ConfigParser stores bool in memory, and getboolean expects string
_get_parser().set('siteconfig', name, str(int(value)))
g.siteconfigHasWrites = True
| {
"content_hash": "3408c472b5c85e6b6e7125585ff81f65",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 71,
"avg_line_length": 26.192307692307693,
"alnum_prop": 0.6696035242290749,
"repo_name": "SHSIDers/oclubs",
"id": "ef3bd2fa1f49bc8b19fb926c981691c79c55a3b3",
"size": "1411",
"binary": false,
"copies": "1",
"ref": "refs/heads/centos7",
"path": "oclubs/access/siteconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54317"
},
{
"name": "HTML",
"bytes": "15923"
},
{
"name": "JavaScript",
"bytes": "14697"
},
{
"name": "Puppet",
"bytes": "12155"
},
{
"name": "Python",
"bytes": "257513"
},
{
"name": "Shell",
"bytes": "8110"
}
],
"symlink_target": ""
} |
import unittest
import math
# Given two squares on a two-dimensional plane, find a line that would cut
# these two squares in half. Assume that the top and the bottom sides of
# the square run parallel to the x-axis.
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, p):
if not isinstance(p, Point):
if p == None and self is None:
return True
else:
return False
return self.x == p.x and self.y == p.y
def __ne__(self, p):
return not self.__eq__(p)
def __str__(self):
return "(" + str(self.x) + ", " + str(self.y) + ")"
def moveWithDelta(self, deltaX, deltaY):
self.x = self.x + deltaX
self.y = self.y + deltaY
def lineSegmentWithPoint(self, p):
if isinstance(p, Point):
return LineSegment(self, p)
else:
return None
def lineWithPoint(self, p):
seg = self.lineSegmentWithPoint(p)
if not seg is None:
return Line(seg.f, seg.b, seg.x)
return None
class Line(object):
def __init__(self, f, b, x = None):
# f is None, if line is vertical to x-axis
self.f = f
self.b = b
if f == None or b == None:
assert(not x == None)
self.x = x
def __eq__(self, line):
if not isinstance(line, Line):
if line == None and self is None:
return True
else:
return False
return self.f == line.f and self.b == line.b
def __ne__(self, line):
return not self.__eq__(line)
def intersection(self, line):
if self.f == line.f and self.b == line.b:
return self
elif self.f == line.f and not self.b == line.b:
return None
else:
if self.f == None or line.f == None:
if self.f == None:
x = self.x
y = line.f * x + line.b
return Point(x, y)
else:
x = line.x
y = self.f * x + self.b
return Point(x, y)
x = float(self.b - line.b) / float(line.f - self.f)
y = self.f * x + self.b
return Point(x, y)
class LineSegment(Line):
def __init__(self, p1, p2):
self.beginPoint = p1
self.endPoint = p2
deltaX = float(p2.x - p1.x)
deltaY = float(p2.y - p1.y)
if deltaX == 0:
super(LineSegment, self).__init__(None, None, p1.x)
else:
f = deltaY / deltaX
b = (p2.x * p1.y - p1.x * p2.y) / deltaX
super(LineSegment, self).__init__(f, b)
def __eq__(self, seg):
if not isinstance(seg, LineSegment):
if seg == None and self is None:
return True
else:
return False
return self.beginPoint == seg.beginPoint and self.endPoint == seg.endPoint and super(LineSegment, self).__eq__(seg)
def __ne__(self, seg):
return not self.__eq__(seg)
def length(self):
deltaX = self.beginPoint.x - self.endPoint.x
deltaY = self.beginPoint.y - self.endPoint.y
return math.sqrt(deltaX ** 2 + deltaY ** 2)
def intersection(self, segOrLine):
if isinstance(segOrLine, Line):
intersection = super(LineSegment, self).intersection(segOrLine)
if intersection == None:
return None
elif isinstance(intersection, Line):
return self
elif intersection.x >= self.beginPoint.x and intersection.x <= self.endPoint.x and intersection.y >= self.beginPoint.y and intersection.y <= self.endPoint.y:
return intersection
else:
return None
elif isinstance(segOrLine, LineSegment):
intersection = super(LineSegment, self).intersection(segOrLine)
if intersection == None:
return None
elif isinstance(intersection, Line):
return self if self.length() <= segOrLine.length() else segOrLine
elif intersection.x >= self.beginPoint.x and intersection.x <= self.endPoint.x and intersection.y >= self.beginPoint.y and intersection.y <= self.endPoint.y and intersection.x >= segOrLine.beginPoint.x and intersection.x <= segOrLine.endPoint.x and intersection.y >= segOrLine.beginPoint.y and intersection.y <= segOrLine.endPoint.y:
return intersection
else:
return None
class Rectangle(object):
def __init__(self, topLeft, topRight, bottomLeft, bottomRight):
assert(isinstance(topLeft, Point) and isinstance(topRight, Point) and isinstance(bottomLeft, Point) and isinstance(bottomRight, Point))
assert(topLeft.y == topRight.y and bottomLeft.y == bottomRight.y)
assert(topLeft.x == bottomLeft.x and topRight.x == bottomRight.x)
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
self.width = topRight.x - topLeft.x
self.height = topLeft.y - bottomLeft.y
self.center = Point(bottomLeft.x + self.width / 2.0, bottomLeft.y + self.height / 2.0)
# def __init__(self, center, width, height):
# assert(isinstance(center, Point))
# assert(width >= 0 and height >= 0)
# self.center = center
# self.width = width
# self.height = height
# self.topLeft = Point(center.x - width / 2.0, center.y + height / 2.0)
# self.topRight = Point(center.x + width / 2.0, center.y + height / 2.0)
# self.bottomLeft = Point(center.x - width / 2.0, center.y - height / 2.0)
# self.bottomRight = Point(center.x + width / 2.0, center.y - height / 2.0)
class Square(Rectangle):
def __init__(self, topLeft, topRight, bottomLeft, bottomRight):
super(Square, self).__init__(topLeft, topRight, bottomLeft, bottomRight)
assert(self.width == self.height)
self.sideLength = self.width
# def __init__(self, center, width, height):
# super(Square, self).__init__(center, width, height)
# assert(width == height)
# self.sideLength = width
# def __init__(self, center, sideLength):
# self.__init__(center, sideLength, sideLength)
class TestPoint(unittest.TestCase):
def setUp(self):
# print "In method", self._testMethodName
pass
def testInit(self):
p = Point(2, 3)
self.assertEqual(p.x, 2)
self.assertEqual(p.y, 3)
def testEqual(self):
p = Point(3, 4)
self.assertTrue(p == Point(3, 4))
def testNotEqual(self):
p = Point(3, 4)
self.assertTrue(not p == Point(3, 3))
def testMoveWithDelta(self):
p = Point(3, 5)
p.moveWithDelta(4, 9)
self.assertEqual(p, Point(7, 14))
class TestLine(unittest.TestCase):
def testInit(self):
line = Line(1, 3)
self.assertEqual(line.f, 1)
self.assertEqual(line.b, 3)
def testIntersection(self):
line1 = Line(1, 3)
line2 = Line(1, 3)
self.assertEqual(line1.intersection(line2), line1)
line3 = Line(1, 4)
self.assertIsNone(line1.intersection(line3))
line4 = Line(0, 3)
self.assertEqual(line1.intersection(line4), Point(0, 3))
line5 = Line(None, None, 1)
self.assertEqual(line1.intersection(line5), Point(1, 4))
line6 = Line(1, 0)
line7 = Line(-1, 5)
self.assertEqual(line6.intersection(line7), Point(2.5, 2.5))
class TestLineSegment(unittest.TestCase):
def testInit(self):
seg = LineSegment(Point(0, 0), Point(5, 5))
self.assertEqual(seg.f, 1)
self.assertEqual(seg.b, 0)
self.assertEqual(seg.beginPoint, Point(0, 0))
self.assertEqual(seg.endPoint, Point(5, 5))
def testLength(self):
seg = LineSegment(Point(1, 1), Point(5, 5))
self.assertEqual(seg.length(), 4 * math.sqrt(2))
def testIntersection(self):
seg1 = LineSegment(Point(1, 1), Point(5, 5))
seg2 = LineSegment(Point(0, 5), Point(5, 0))
self.assertEqual(seg1.intersection(seg2), Point(2.5, 2.5))
seg3 = Line(1, 0)
self.assertEqual(seg1.intersection(seg3), seg1)
seg4 = Line(2, 0)
self.assertIsNone(seg1.intersection(seg4))
seg5 = Line(None, None, 1)
self.assertEqual(seg1.intersection(seg5), Point(1, 1))
def unitTest():
unittest.main()
# unitTest()
# Solution
def cutInHalfLine(s1, s2):
line = s1.center.lineWithPoint(s2.center)
return line
def test():
s1 = Square(Point(1, 5), Point(5, 5), Point(1, 1), Point(5, 1))
s2 = Square(Point(2, 9), Point(4, 9), Point(2, 7), Point(4, 7))
print "Passed" if cutInHalfLine(s1, s2) == Line(None, None, 3) else "Failed"
s3 = Square(Point(-1, 1), Point(1, 1), Point(-1, -1), Point(1, -1))
print "Passed" if cutInHalfLine(s1, s3) == Line(1, 0) else "Failed"
test()
| {
"content_hash": "541995f0a663ca5ba7ccddb91763d19d",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 336,
"avg_line_length": 30.228682170542637,
"alnum_prop": 0.6617515066034106,
"repo_name": "honghaoz/CrackingTheCodingInterview",
"id": "f6825e0f0d34e2e20f533ddf3d95f2617ad08d62",
"size": "7799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Cracking the Coding Interview/Chapter 7_Mathematics and Probability/7.5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95176"
},
{
"name": "Swift",
"bytes": "318658"
}
],
"symlink_target": ""
} |
import zmq
import json
import random
import time
import sys
import datetime
#client side
def push_messages(workernum):
""" generates rngs and pushes """
context = zmq.Context()
socket = context.socket(zmq.PUB)
name = "worker%i" % workernum
print "Starting %s." % name
socket.setsockopt(zmq.IDENTITY, name)
socket.connect('tcp://127.0.0.1:5000')
#socket.bind('tcp://127.0.0.1:5000')
while True:
try:
msg = json.dumps({'worker': name, 'feed': random.random(),
'timestamp':str(datetime.datetime.now())})
print "Worker %s - sending %s" % (name, msg)
socket.send_multipart(['data.fundamental.umm', msg])
except KeyboardInterrupt:
print "Stopping %s..." % name
break
time.sleep(random.random()/100.0)
if __name__ == "__main__":
if len(sys.argv)<2:
workernum = random.randint(1, 100)
else:
workernum = int(sys.argv[1])
push_messages(workernum)
| {
"content_hash": "18fba43324daaed85a64ab76706674c1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 72,
"avg_line_length": 27.7027027027027,
"alnum_prop": 0.5785365853658536,
"repo_name": "bjorskog/majordomo",
"id": "8d415389513f8953081954ee0198bf25d516edf5",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "majordomo/tests/test_worker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23259"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. TC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TC = PhoneMetadata(id='TC', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[5689]\\d{9}', possible_number_pattern='\\d{7}(?:\\d{3})?'),
fixed_line=PhoneNumberDesc(national_number_pattern='649(?:712|9(?:4\\d|50))\\d{4}', possible_number_pattern='\\d{7}(?:\\d{3})?', example_number='6497121234'),
mobile=PhoneNumberDesc(national_number_pattern='649(?:2(?:3[129]|4[1-7])|3(?:3[1-389]|4[1-7])|4[34][12])\\d{4}', possible_number_pattern='\\d{10}', example_number='6492311234'),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|55|66|77|88)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='8002345678'),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='9002345678'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|33|44)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='5002345678'),
voip=PhoneNumberDesc(national_number_pattern='64971[01]\\d{4}', possible_number_pattern='\\d{10}', example_number='6497101234'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='9(?:11|99)', possible_number_pattern='\\d{3}', example_number='911'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='649')
| {
"content_hash": "555408e19fb5390c491d262d70b540b8",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 181,
"avg_line_length": 96.68181818181819,
"alnum_prop": 0.7169722614010343,
"repo_name": "ayushgoel/FixGoogleContacts",
"id": "8c8ec06d469ed9809ad917dbf429124c9612e345",
"size": "2127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phonenumbers/data/region_TC.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4319489"
}
],
"symlink_target": ""
} |
from django.db import models
from django_yubin import constants
try:
from django.utils.timezone import now
except ImportError:
import datetime
now = datetime.datetime.now
class QueueMethods(object):
"""
A mixin which provides extra methods to a QuerySet/Manager subclass.
"""
def exclude_future(self):
"""
Exclude future time-delayed messages.
"""
return self.exclude(date_queued__gt=now)
def high_priority(self):
"""
Return a QuerySet of high priority queued messages.
"""
return self.filter(priority=constants.PRIORITY_HIGH)
def normal_priority(self):
"""
Return a QuerySet of normal priority queued messages.
"""
return self.filter(priority=constants.PRIORITY_NORMAL)
def low_priority(self):
"""
Return a QuerySet of low priority queued messages.
"""
return self.filter(priority=constants.PRIORITY_LOW)
def non_deferred(self):
"""
Return a QuerySet containing all non-deferred queued messages,
excluding "future" messages.
"""
return self.exclude_future().filter(deferred=None)
def deferred(self):
"""
Return a QuerySet of all deferred messages in the queue, excluding
"future" messages.
"""
return self.exclude_future().exclude(deferred=None)
class QueueQuerySet(QueueMethods, models.query.QuerySet):
pass
class QueueManager(QueueMethods, models.Manager):
use_for_related_fields = True
def get_queryset(self):
return QueueQuerySet(self.model, using=self._db)
def retry_deferred(self, max_retries=None, new_priority=None):
"""
Reset the deferred flag for all deferred messages so they will be
retried.
If ``max_retries`` is set, deferred messages which have been retried
more than this many times will *not* have their deferred flag reset.
If ``new_priority`` is ``None`` (default), deferred messages retain
their original priority level. Otherwise all reset deferred messages
will be set to this priority level.
"""
queryset = self.deferred()
if max_retries:
queryset = queryset.filter(retries__lte=max_retries)
count = queryset.count()
update_kwargs = dict(deferred=None, retries=models.F('retries')+1)
if new_priority is not None:
update_kwargs['priority'] = new_priority
queryset.update(**update_kwargs)
return count
| {
"content_hash": "8b23543b1b4a440f2e6259198abec9e2",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 76,
"avg_line_length": 27.752688172043012,
"alnum_prop": 0.6392870980240217,
"repo_name": "sergei-maertens/django-yubin",
"id": "abd38f307aa36997cfd1aace3ab69101e41e02a5",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_yubin/managers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4922"
},
{
"name": "Python",
"bytes": "109132"
},
{
"name": "Shell",
"bytes": "1382"
}
],
"symlink_target": ""
} |
import unittest
import os
import sys
import shlex, subprocess
import urllib, commands, time, getpass, re, json, shlex
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../', 'bitbake/lib/toaster')))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toastermain.settings")
import toastermain.settings
from django.db.models import Q
from orm.models import *
from oeqa.utils.decorators import testcase
class ToasterSetup(oeSelfTest):
def recipe_parse(self, file_path, var):
for line in open(file_path,'r'):
if line.find(var) > -1:
val = line.split(" = ")[1].replace("\"", "").strip()
return val
def fix_file_path(self, file_path):
if ":" in file_path:
file_path=file_path.split(":")[2]
return file_path
class Toaster_DB_Tests(ToasterSetup):
# Check if build name is unique - tc_id=795
@testcase(795)
def test_Build_Unique_Name(self):
all_builds = Build.objects.all().count()
distinct_builds = Build.objects.values('id').distinct().count()
self.assertEqual(distinct_builds, all_builds, msg = 'Build name is not unique')
# Check if build coocker log path is unique - tc_id=819
@testcase(819)
def test_Build_Unique_Cooker_Log_Path(self):
distinct_path = Build.objects.values('cooker_log_path').distinct().count()
total_builds = Build.objects.values('id').count()
self.assertEqual(distinct_path, total_builds, msg = 'Build coocker log path is not unique')
# Check if the number of errors matches the number of orm_logmessage.level entries with value 2 - tc_id=820
@testcase(820)
def test_Build_Errors_No(self):
builds = Build.objects.values('id', 'errors_no')
cnt_err = []
for build in builds:
log_mess_err_no = LogMessage.objects.filter(build = build['id'], level = 2).count()
if (build['errors_no'] != log_mess_err_no):
cnt_err.append(build['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
# Check if the number of warnings matches the number of orm_logmessage.level entries with value 1 - tc=821
@testcase(821)
def test_Build_Warnings_No(self):
builds = Build.objects.values('id', 'warnings_no')
cnt_err = []
for build in builds:
log_mess_warn_no = LogMessage.objects.filter(build = build['id'], level = 1).count()
if (build['warnings_no'] != log_mess_warn_no):
cnt_err.append(build['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
# Check if the build succeeded then the errors_no is 0 - tc_id=822
@testcase(822)
def test_Build_Suceeded_Errors_No(self):
builds = Build.objects.filter(outcome = 0).values('id', 'errors_no')
cnt_err = []
for build in builds:
if (build['errors_no'] != 0):
cnt_err.append(build['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
# Check if task order is unique for one build - tc=824
@testcase(824)
def test_Task_Unique_Order(self):
builds = Build.objects.values('id')
cnt_err = []
for build in builds:
total_task_order = Task.objects.filter(build = build['id']).values('order').count()
distinct_task_order = Task.objects.filter(build = build['id']).values('order').distinct().count()
if (total_task_order != distinct_task_order):
cnt_err.append(build['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
# Check task order sequence for one build - tc=825
@testcase(825)
def test_Task_Order_Sequence(self):
builds = builds = Build.objects.values('id')
cnt_err = []
for build in builds:
tasks = Task.objects.filter(Q(build = build['id']), ~Q(order = None), ~Q(task_name__contains = '_setscene')).values('id', 'order').order_by("order")
cnt_tasks = 0
for task in tasks:
cnt_tasks += 1
if (task['order'] != cnt_tasks):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if disk_io matches the difference between EndTimeIO and StartTimeIO in build stats - tc=828
### this needs to be updated ###
#def test_Task_Disk_IO_TC828(self):
# Check if outcome = 2 (SSTATE) then sstate_result must be 3 (RESTORED) - tc=832
@testcase(832)
def test_Task_If_Outcome_2_Sstate_Result_Must_Be_3(self):
tasks = Task.objects.filter(outcome = 2).values('id', 'sstate_result')
cnt_err = []
for task in tasks:
if (row['sstate_result'] != 3):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if outcome = 1 (COVERED) or 3 (EXISTING) then sstate_result must be 0 (SSTATE_NA) - tc=833
@testcase(833)
def test_Task_If_Outcome_1_3_Sstate_Result_Must_Be_0(self):
tasks = Task.objects.filter(outcome__in = (1, 3)).values('id', 'sstate_result')
cnt_err = []
for task in tasks:
if (task['sstate_result'] != 0):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if outcome is 0 (SUCCESS) or 4 (FAILED) then sstate_result must be 0 (NA), 1 (MISS) or 2 (FAILED) - tc=834
@testcase(834)
def test_Task_If_Outcome_0_4_Sstate_Result_Must_Be_0_1_2(self):
tasks = Task.objects.filter(outcome__in = (0, 4)).values('id', 'sstate_result')
cnt_err = []
for task in tasks:
if (task['sstate_result'] not in [0, 1, 2]):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if task_executed = TRUE (1), script_type must be 0 (CODING_NA), 2 (CODING_PYTHON), 3 (CODING_SHELL) - tc=891
@testcase(891)
def test_Task_If_Task_Executed_True_Script_Type_0_2_3(self):
tasks = Task.objects.filter(task_executed = 1).values('id', 'script_type')
cnt_err = []
for task in tasks:
if (task['script_type'] not in [0, 2, 3]):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if task_executed = TRUE (1), outcome must be 0 (SUCCESS) or 4 (FAILED) - tc=836
@testcase(836)
def test_Task_If_Task_Executed_True_Outcome_0_4(self):
tasks = Task.objects.filter(task_executed = 1).values('id', 'outcome')
cnt_err = []
for task in tasks:
if (task['outcome'] not in [0, 4]):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if task_executed = FALSE (0), script_type must be 0 - tc=890
@testcase(890)
def test_Task_If_Task_Executed_False_Script_Type_0(self):
tasks = Task.objects.filter(task_executed = 0).values('id', 'script_type')
cnt_err = []
for task in tasks:
if (task['script_type'] != 0):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Check if task_executed = FALSE (0) and build outcome = SUCCEEDED (0), task outcome must be 1 (COVERED), 2 (CACHED), 3 (PREBUILT), 5 (EMPTY) - tc=837
@testcase(837)
def test_Task_If_Task_Executed_False_Outcome_1_2_3_5(self):
builds = Build.objects.filter(outcome = 0).values('id')
cnt_err = []
for build in builds:
tasks = Task.objects.filter(build = build['id'], task_executed = 0).values('id', 'outcome')
for task in tasks:
if (task['outcome'] not in [1, 2, 3, 5]):
cnt_err.append(task['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
# Key verification - tc=888
@testcase(888)
def test_Target_Installed_Package(self):
rows = Target_Installed_Package.objects.values('id', 'target_id', 'package_id')
cnt_err = []
for row in rows:
target = Target.objects.filter(id = row['target_id']).values('id')
package = Package.objects.filter(id = row['package_id']).values('id')
if (not target or not package):
cnt_err.append(row['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for target installed package id: %s' % cnt_err)
# Key verification - tc=889
@testcase(889)
def test_Task_Dependency(self):
rows = Task_Dependency.objects.values('id', 'task_id', 'depends_on_id')
cnt_err = []
for row in rows:
task_id = Task.objects.filter(id = row['task_id']).values('id')
depends_on_id = Task.objects.filter(id = row['depends_on_id']).values('id')
if (not task_id or not depends_on_id):
cnt_err.append(row['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for task dependency id: %s' % cnt_err)
# Check if build target file_name is populated only if is_image=true AND orm_build.outcome=0 then if the file exists and its size matches the file_size value
### Need to add the tc in the test run
@testcase(1037)
def test_Target_File_Name_Populated(self):
builds = Build.objects.filter(outcome = 0).values('id')
for build in builds:
targets = Target.objects.filter(build_id = build['id'], is_image = 1).values('id')
for target in targets:
target_files = Target_Image_File.objects.filter(target_id = target['id']).values('id', 'file_name', 'file_size')
cnt_err = []
for file_info in target_files:
target_id = file_info['id']
target_file_name = file_info['file_name']
target_file_size = file_info['file_size']
if (not target_file_name or not target_file_size):
cnt_err.append(target_id)
else:
if (not os.path.exists(target_file_name)):
cnt_err.append(target_id)
else:
if (os.path.getsize(target_file_name) != target_file_size):
cnt_err.append(target_id)
self.assertEqual(len(cnt_err), 0, msg = 'Errors for target image file id: %s' % cnt_err)
# Key verification - tc=884
@testcase(884)
def test_Package_Dependency(self):
cnt_err = []
deps = Package_Dependency.objects.values('id', 'package_id', 'depends_on_id')
for dep in deps:
if (dep['package_id'] == dep['depends_on_id']):
cnt_err.append(dep['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package dependency id: %s' % cnt_err)
# Check if recipe name does not start with a number (0-9) - tc=838
@testcase(838)
def test_Recipe_Name(self):
recipes = Recipe.objects.values('id', 'name')
cnt_err = []
for recipe in recipes:
if (recipe['name'][0].isdigit() is True):
cnt_err.append(recipe['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
# Check if recipe section matches the content of the SECTION variable (if set) in file_path - tc=839
@testcase(839)
def test_Recipe_DB_Section_Match_Recipe_File_Section(self):
recipes = Recipe.objects.values('id', 'section', 'file_path')
cnt_err = []
for recipe in recipes:
file_path = self.fix_file_path(recipe['file_path'])
file_exists = os.path.isfile(file_path)
if (not file_path or (file_exists is False)):
cnt_err.append(recipe['id'])
else:
file_section = self.recipe_parse(file_path, "SECTION = ")
db_section = recipe['section']
if file_section:
if (db_section != file_section):
cnt_err.append(recipe['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
# Check if recipe license matches the content of the LICENSE variable (if set) in file_path - tc=840
@testcase(840)
def test_Recipe_DB_License_Match_Recipe_File_License(self):
recipes = Recipe.objects.values('id', 'license', 'file_path')
cnt_err = []
for recipe in recipes:
file_path = self.fix_file_path(recipe['file_path'])
file_exists = os.path.isfile(file_path)
if (not file_path or (file_exists is False)):
cnt_err.append(recipe['id'])
else:
file_license = self.recipe_parse(file_path, "LICENSE = ")
db_license = recipe['license']
if file_license:
if (db_license != file_license):
cnt_err.append(recipe['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
# Check if recipe homepage matches the content of the HOMEPAGE variable (if set) in file_path - tc=841
@testcase(841)
def test_Recipe_DB_Homepage_Match_Recipe_File_Homepage(self):
recipes = Recipe.objects.values('id', 'homepage', 'file_path')
cnt_err = []
for recipe in recipes:
file_path = self.fix_file_path(recipe['file_path'])
file_exists = os.path.isfile(file_path)
if (not file_path or (file_exists is False)):
cnt_err.append(recipe['id'])
else:
file_homepage = self.recipe_parse(file_path, "HOMEPAGE = ")
db_homepage = recipe['homepage']
if file_homepage:
if (db_homepage != file_homepage):
cnt_err.append(recipe['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
# Check if recipe bugtracker matches the content of the BUGTRACKER variable (if set) in file_path - tc=842
@testcase(842)
def test_Recipe_DB_Bugtracker_Match_Recipe_File_Bugtracker(self):
recipes = Recipe.objects.values('id', 'bugtracker', 'file_path')
cnt_err = []
for recipe in recipes:
file_path = self.fix_file_path(recipe['file_path'])
file_exists = os.path.isfile(file_path)
if (not file_path or (file_exists is False)):
cnt_err.append(recipe['id'])
else:
file_bugtracker = self.recipe_parse(file_path, "BUGTRACKER = ")
db_bugtracker = recipe['bugtracker']
if file_bugtracker:
if (db_bugtracker != file_bugtracker):
cnt_err.append(recipe['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
# Recipe key verification, recipe name does not depends on a recipe having the same name - tc=883
@testcase(883)
def test_Recipe_Dependency(self):
deps = Recipe_Dependency.objects.values('id', 'recipe_id', 'depends_on_id')
cnt_err = []
for dep in deps:
if (not dep['recipe_id'] or not dep['depends_on_id']):
cnt_err.append(dep['id'])
else:
name = Recipe.objects.filter(id = dep['recipe_id']).values('name')
dep_name = Recipe.objects.filter(id = dep['depends_on_id']).values('name')
if (name == dep_name):
cnt_err.append(dep['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe dependency id: %s' % cnt_err)
# Check if package name does not start with a number (0-9) - tc=846
@testcase(846)
def test_Package_Name_For_Number(self):
packages = Package.objects.filter(~Q(size = -1)).values('id', 'name')
cnt_err = []
for package in packages:
if (package['name'][0].isdigit() is True):
cnt_err.append(package['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
# Check if package version starts with a number (0-9) - tc=847
@testcase(847)
def test_Package_Version_Starts_With_Number(self):
packages = Package.objects.filter(~Q(size = -1)).values('id', 'version')
cnt_err = []
for package in packages:
if (package['version'][0].isdigit() is False):
cnt_err.append(package['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
# Check if package revision starts with 'r' - tc=848
@testcase(848)
def test_Package_Revision_Starts_With_r(self):
packages = Package.objects.filter(~Q(size = -1)).values('id', 'revision')
cnt_err = []
for package in packages:
if (package['revision'][0].startswith("r") is False):
cnt_err.append(package['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
# Check the validity of the package build_id
### TC must be added in test run
@testcase(1038)
def test_Package_Build_Id(self):
packages = Package.objects.filter(~Q(size = -1)).values('id', 'build_id')
cnt_err = []
for package in packages:
build_id = Build.objects.filter(id = package['build_id']).values('id')
if (not build_id):
cnt_err.append(package['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
# Check the validity of package recipe_id
### TC must be added in test run
@testcase(1039)
def test_Package_Recipe_Id(self):
packages = Package.objects.filter(~Q(size = -1)).values('id', 'recipe_id')
cnt_err = []
for package in packages:
recipe_id = Recipe.objects.filter(id = package['recipe_id']).values('id')
if (not recipe_id):
cnt_err.append(package['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
# Check if package installed_size field is not null
### TC must be aded in test run
@testcase(1040)
def test_Package_Installed_Size_Not_NULL(self):
packages = Package.objects.filter(installed_size__isnull = True).values('id')
cnt_err = []
for package in packages:
cnt_err.append(package['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
# Check if all layers requests return exit code is 200 - tc=843
@testcase(843)
def test_Layers_Requests_Exit_Code(self):
layers = Layer.objects.values('id', 'layer_index_url')
cnt_err = []
for layer in layers:
resp = urllib.urlopen(layer['layer_index_url'])
if (resp.getcode() != 200):
cnt_err.append(layer['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err)
# Check if the output of bitbake-layers show_layers matches the info from database - tc=895
@testcase(895)
def test_Layers_Show_Layers(self):
layers = Layer.objects.values('id', 'name', 'local_path')
cmd = commands.getoutput('bitbake-layers show_layers')
cnt_err = []
for layer in layers:
if (layer['name'] or layer['local_path']) not in cmd:
cnt_err.append(layer['id'])
self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err)
# Check if django server starts regardless of the timezone set on the machine - tc=905
@testcase(905)
def test_Start_Django_Timezone(self):
current_path = os.getcwd()
zonefilelist = []
ZONEINFOPATH = '/usr/share/zoneinfo/'
os.chdir("../bitbake/lib/toaster/")
cnt_err = 0
for filename in os.listdir(ZONEINFOPATH):
if os.path.isfile(os.path.join(ZONEINFOPATH, filename)):
zonefilelist.append(filename)
for k in range(len(zonefilelist)):
if k <= 5:
files = zonefilelist[k]
os.system("export TZ="+str(files)+"; python manage.py runserver > /dev/null 2>&1 &")
time.sleep(3)
pid = subprocess.check_output("ps aux | grep '[/u]sr/bin/python manage.py runserver' | awk '{print $2}'", shell = True)
if pid:
os.system("kill -9 "+str(pid))
else:
cnt_err.append(zonefilelist[k])
self.assertEqual(cnt_err, 0, msg = 'Errors django server does not start with timezone: %s' % cnt_err)
os.chdir(current_path)
| {
"content_hash": "37d3ed497b7f6855d0eaf0dcaee9f4a3",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 161,
"avg_line_length": 41.63370786516854,
"alnum_prop": 0.6594699627570573,
"repo_name": "wwright2/dcim3-angstrom1",
"id": "1cf28a0144960e54b17727b59a9178f810f5fe57",
"size": "18527",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sources/openembedded-core/meta/lib/oeqa/selftest/_toaster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "73541"
},
{
"name": "Awk",
"bytes": "286"
},
{
"name": "Batchfile",
"bytes": "19960"
},
{
"name": "BitBake",
"bytes": "2875212"
},
{
"name": "BlitzBasic",
"bytes": "6367"
},
{
"name": "C",
"bytes": "1598095"
},
{
"name": "C++",
"bytes": "2198121"
},
{
"name": "CMake",
"bytes": "7277"
},
{
"name": "CSS",
"bytes": "28636"
},
{
"name": "Groff",
"bytes": "502999"
},
{
"name": "HTML",
"bytes": "210823"
},
{
"name": "JavaScript",
"bytes": "23100"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32539"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "PHP",
"bytes": "829048"
},
{
"name": "Pascal",
"bytes": "17352"
},
{
"name": "Perl",
"bytes": "66339"
},
{
"name": "Python",
"bytes": "3672452"
},
{
"name": "QMake",
"bytes": "165"
},
{
"name": "Ruby",
"bytes": "10695"
},
{
"name": "Shell",
"bytes": "820076"
},
{
"name": "SourcePawn",
"bytes": "259600"
},
{
"name": "Tcl",
"bytes": "4897"
},
{
"name": "VimL",
"bytes": "8483"
},
{
"name": "XSLT",
"bytes": "9089"
}
],
"symlink_target": ""
} |
@register_line_magic
def lifelines(line):
s = ("from pandas import DataFrame, Series\n"
"import lifelines\n"
"from lifelines import KaplanMeierFitter, NelsonAalenFitter, AalenAdditiveFitter, CoxPHFitter"
)
_ip.run_code(s)
# %stats
@register_line_magic
def stats(line):
_ip.run_code("import scipy.stats as stats")
_ip.run_line_magic("lifelines", line)
del stats, lifelines
| {
"content_hash": "1c19321fe9a4cadcc7ba4eb37c5628ab",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 102,
"avg_line_length": 25.9375,
"alnum_prop": 0.6867469879518072,
"repo_name": "CamDavidsonPilon/StartupFiles",
"id": "d5d01593732e7b4438fe6d3fc5975f76aca280d1",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "startup/05-data_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8436"
}
],
"symlink_target": ""
} |
"""Service descriptor."""
from cached_property import cached_property
from docbuilder.descriptors.base import Descriptor
from docbuilder.descriptors.model import Model
class Service(Descriptor):
"""Service descriptor."""
@classmethod
def collect(cls):
"""Collects all application services."""
from ggrc.services import all_services
return all_services()
@cached_property
def name(self):
"""Service name."""
return '%s -> %s' % (self.model.name, self.obj.name)
@cached_property
def url(self):
"""Endpoint URL."""
return '/api/%s' % self.obj.name
@cached_property
def doc(self):
"""Doc-stirng of wrapped model class."""
return self.model.doc
@cached_property
def model(self):
"""Descriptor of wrapped model class."""
return Model(self.obj.model_class)
@cached_property
def readonly(self):
"""Is service read-only?"""
return self.obj.service_class.__name__.startswith('ReadOnly')
| {
"content_hash": "dd61f132ec34226bf388302e9f94af72",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 65,
"avg_line_length": 23,
"alnum_prop": 0.6770186335403726,
"repo_name": "josthkko/ggrc-core",
"id": "c7b0aaf43f354242618aa1e96458bb7253d56f05",
"size": "1079",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "src/docbuilder/descriptors/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163629"
},
{
"name": "Cucumber",
"bytes": "136321"
},
{
"name": "HTML",
"bytes": "1057288"
},
{
"name": "JavaScript",
"bytes": "1492054"
},
{
"name": "Makefile",
"bytes": "6161"
},
{
"name": "Mako",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "2148568"
},
{
"name": "Shell",
"bytes": "29929"
}
],
"symlink_target": ""
} |
from django.db.models import signals
DUMMY_BMP_DATA = b'BM:\x00\x00\x00\x00\x00\x00\x006\x00\x00\x00(\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x18\x00\x00\x00\x00\x00\x04\x00\x00\x00\x13\x0b\x00\x00\x13\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def signals_switch():
pre_save = signals.pre_save.receivers
post_save = signals.post_save.receivers
def disconnect():
signals.pre_save.receivers = []
signals.post_save.receivers = []
def reconnect():
signals.pre_save.receivers = pre_save
signals.post_save.receivers = post_save
return disconnect, reconnect
disconnect_signals, reconnect_signals = signals_switch()
def _helper_test_http_method_responses(client, method, url, data, users, after_each_request=None,
content_type="application/json"):
results = []
for user in users:
if user is None:
client.logout()
else:
client.login(user)
if data:
response = getattr(client, method)(url, data, content_type=content_type)
else:
response = getattr(client, method)(url)
#if response.status_code >= 400:
# print("Response content:", response.content)
results.append(response)
if after_each_request is not None:
after_each_request()
return results
def helper_test_http_method(client, method, url, data, users, after_each_request=None,
content_type="application/json"):
responses = _helper_test_http_method_responses(client, method, url, data, users, after_each_request,
content_type=content_type)
return list(map(lambda r: r.status_code, responses))
def helper_test_http_method_and_count(client, method, url, data, users, after_each_request=None):
responses = _helper_test_http_method_responses(client, method, url, data, users, after_each_request)
return list(map(lambda r: (r.status_code, len(r.data) if isinstance(r.data, list) and 200 <= r.status_code < 300 else 0), responses))
def helper_test_http_method_and_keys(client, method, url, data, users, after_each_request=None):
responses = _helper_test_http_method_responses(client, method, url, data, users, after_each_request)
return list(map(lambda r: (r.status_code, set(r.data.keys() if isinstance(r.data, dict) and 200 <= r.status_code < 300 else [])), responses))
| {
"content_hash": "454f139a24e3e4139317cfd0a78e6e9b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 237,
"avg_line_length": 40.85245901639344,
"alnum_prop": 0.6480738362760835,
"repo_name": "mattcongy/itshop",
"id": "2a8a4855aafa04b0cd8d59933e221024ffadddbf",
"size": "3488",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker-images/taigav2/taiga-back/tests/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103474"
},
{
"name": "CoffeeScript",
"bytes": "3380"
},
{
"name": "HTML",
"bytes": "274547"
},
{
"name": "JavaScript",
"bytes": "203660"
},
{
"name": "Nginx",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "3591150"
},
{
"name": "Ruby",
"bytes": "164978"
},
{
"name": "Shell",
"bytes": "5238"
}
],
"symlink_target": ""
} |
import numpy as np
import ray.experimental.array.remote as ra
import ray
BLOCK_SIZE = 10
class DistArray:
def __init__(self, shape, object_refs=None):
self.shape = shape
self.ndim = len(shape)
self.num_blocks = [
int(np.ceil(1.0 * a / BLOCK_SIZE)) for a in self.shape
]
if object_refs is not None:
self.object_refs = object_refs
else:
self.object_refs = np.empty(self.num_blocks, dtype=object)
if self.num_blocks != list(self.object_refs.shape):
raise Exception(
"The fields `num_blocks` and `object_refs` are "
"inconsistent, `num_blocks` is {} and `object_refs` "
"has shape {}".format(self.num_blocks,
list(self.object_refs.shape)))
@staticmethod
def compute_block_lower(index, shape):
if len(index) != len(shape):
raise Exception("The fields `index` and `shape` must have the "
"same length, but `index` is {} and `shape` is "
"{}.".format(index, shape))
return [elem * BLOCK_SIZE for elem in index]
@staticmethod
def compute_block_upper(index, shape):
if len(index) != len(shape):
raise Exception("The fields `index` and `shape` must have the "
"same length, but `index` is {} and `shape` is "
"{}.".format(index, shape))
upper = []
for i in range(len(shape)):
upper.append(min((index[i] + 1) * BLOCK_SIZE, shape[i]))
return upper
@staticmethod
def compute_block_shape(index, shape):
lower = DistArray.compute_block_lower(index, shape)
upper = DistArray.compute_block_upper(index, shape)
return [u - l for (l, u) in zip(lower, upper)]
@staticmethod
def compute_num_blocks(shape):
return [int(np.ceil(1.0 * a / BLOCK_SIZE)) for a in shape]
def assemble(self):
"""Assemble an array from a distributed array of object refs."""
first_block = ray.get(self.object_refs[(0, ) * self.ndim])
dtype = first_block.dtype
result = np.zeros(self.shape, dtype=dtype)
for index in np.ndindex(*self.num_blocks):
lower = DistArray.compute_block_lower(index, self.shape)
upper = DistArray.compute_block_upper(index, self.shape)
value = ray.get(self.object_refs[index])
result[tuple(slice(l, u) for (l, u) in zip(lower, upper))] = value
return result
def __getitem__(self, sliced):
# TODO(rkn): Fix this, this is just a placeholder that should work but
# is inefficient.
a = self.assemble()
return a[sliced]
@ray.remote
def assemble(a):
return a.assemble()
# TODO(rkn): What should we call this method?
@ray.remote
def numpy_to_dist(a):
result = DistArray(a.shape)
for index in np.ndindex(*result.num_blocks):
lower = DistArray.compute_block_lower(index, a.shape)
upper = DistArray.compute_block_upper(index, a.shape)
idx = tuple(slice(l, u) for (l, u) in zip(lower, upper))
result.object_refs[index] = ray.put(a[idx])
return result
@ray.remote
def zeros(shape, dtype_name="float"):
result = DistArray(shape)
for index in np.ndindex(*result.num_blocks):
result.object_refs[index] = ra.zeros.remote(
DistArray.compute_block_shape(index, shape), dtype_name=dtype_name)
return result
@ray.remote
def ones(shape, dtype_name="float"):
result = DistArray(shape)
for index in np.ndindex(*result.num_blocks):
result.object_refs[index] = ra.ones.remote(
DistArray.compute_block_shape(index, shape), dtype_name=dtype_name)
return result
@ray.remote
def copy(a):
result = DistArray(a.shape)
for index in np.ndindex(*result.num_blocks):
# We don't need to actually copy the objects because remote objects are
# immutable.
result.object_refs[index] = a.object_refs[index]
return result
@ray.remote
def eye(dim1, dim2=-1, dtype_name="float"):
dim2 = dim1 if dim2 == -1 else dim2
shape = [dim1, dim2]
result = DistArray(shape)
for (i, j) in np.ndindex(*result.num_blocks):
block_shape = DistArray.compute_block_shape([i, j], shape)
if i == j:
result.object_refs[i, j] = ra.eye.remote(
block_shape[0], block_shape[1], dtype_name=dtype_name)
else:
result.object_refs[i, j] = ra.zeros.remote(
block_shape, dtype_name=dtype_name)
return result
@ray.remote
def triu(a):
if a.ndim != 2:
raise Exception("Input must have 2 dimensions, but a.ndim is "
"{}.".format(a.ndim))
result = DistArray(a.shape)
for (i, j) in np.ndindex(*result.num_blocks):
if i < j:
result.object_refs[i, j] = ra.copy.remote(a.object_refs[i, j])
elif i == j:
result.object_refs[i, j] = ra.triu.remote(a.object_refs[i, j])
else:
result.object_refs[i, j] = ra.zeros_like.remote(
a.object_refs[i, j])
return result
@ray.remote
def tril(a):
if a.ndim != 2:
raise Exception("Input must have 2 dimensions, but a.ndim is "
"{}.".format(a.ndim))
result = DistArray(a.shape)
for (i, j) in np.ndindex(*result.num_blocks):
if i > j:
result.object_refs[i, j] = ra.copy.remote(a.object_refs[i, j])
elif i == j:
result.object_refs[i, j] = ra.tril.remote(a.object_refs[i, j])
else:
result.object_refs[i, j] = ra.zeros_like.remote(
a.object_refs[i, j])
return result
@ray.remote
def blockwise_dot(*matrices):
n = len(matrices)
if n % 2 != 0:
raise Exception("blockwise_dot expects an even number of arguments, "
"but len(matrices) is {}.".format(n))
shape = (matrices[0].shape[0], matrices[n // 2].shape[1])
result = np.zeros(shape)
for i in range(n // 2):
result += np.dot(matrices[i], matrices[n // 2 + i])
return result
@ray.remote
def dot(a, b):
if a.ndim != 2:
raise Exception("dot expects its arguments to be 2-dimensional, but "
"a.ndim = {}.".format(a.ndim))
if b.ndim != 2:
raise Exception("dot expects its arguments to be 2-dimensional, but "
"b.ndim = {}.".format(b.ndim))
if a.shape[1] != b.shape[0]:
raise Exception("dot expects a.shape[1] to equal b.shape[0], but "
"a.shape = {} and b.shape = {}.".format(
a.shape, b.shape))
shape = [a.shape[0], b.shape[1]]
result = DistArray(shape)
for (i, j) in np.ndindex(*result.num_blocks):
args = list(a.object_refs[i, :]) + list(b.object_refs[:, j])
result.object_refs[i, j] = blockwise_dot.remote(*args)
return result
@ray.remote
def subblocks(a, *ranges):
"""
This function produces a distributed array from a subset of the blocks in
the `a`. The result and `a` will have the same number of dimensions. For
example,
subblocks(a, [0, 1], [2, 4])
will produce a DistArray whose object_refs are
[[a.object_refs[0, 2], a.object_refs[0, 4]],
[a.object_refs[1, 2], a.object_refs[1, 4]]]
We allow the user to pass in an empty list [] to indicate the full range.
"""
ranges = list(ranges)
if len(ranges) != a.ndim:
raise Exception("sub_blocks expects to receive a number of ranges "
"equal to a.ndim, but it received {} ranges and "
"a.ndim = {}.".format(len(ranges), a.ndim))
for i in range(len(ranges)):
# We allow the user to pass in an empty list to indicate the full
# range.
if ranges[i] == []:
ranges[i] = range(a.num_blocks[i])
if not np.alltrue(ranges[i] == np.sort(ranges[i])):
raise Exception("Ranges passed to sub_blocks must be sorted, but "
"the {}th range is {}.".format(i, ranges[i]))
if ranges[i][0] < 0:
raise Exception("Values in the ranges passed to sub_blocks must "
"be at least 0, but the {}th range is {}.".format(
i, ranges[i]))
if ranges[i][-1] >= a.num_blocks[i]:
raise Exception("Values in the ranges passed to sub_blocks must "
"be less than the relevant number of blocks, but "
"the {}th range is {}, and a.num_blocks = {}."
.format(i, ranges[i], a.num_blocks))
last_index = [r[-1] for r in ranges]
last_block_shape = DistArray.compute_block_shape(last_index, a.shape)
shape = [(len(ranges[i]) - 1) * BLOCK_SIZE + last_block_shape[i]
for i in range(a.ndim)]
result = DistArray(shape)
for index in np.ndindex(*result.num_blocks):
result.object_refs[index] = a.object_refs[tuple(
ranges[i][index[i]] for i in range(a.ndim))]
return result
@ray.remote
def transpose(a):
if a.ndim != 2:
raise Exception("transpose expects its argument to be 2-dimensional, "
"but a.ndim = {}, a.shape = {}.".format(
a.ndim, a.shape))
result = DistArray([a.shape[1], a.shape[0]])
for i in range(result.num_blocks[0]):
for j in range(result.num_blocks[1]):
result.object_refs[i, j] = ra.transpose.remote(a.object_refs[j, i])
return result
# TODO(rkn): support broadcasting?
@ray.remote
def add(x1, x2):
if x1.shape != x2.shape:
raise Exception("add expects arguments `x1` and `x2` to have the same "
"shape, but x1.shape = {}, and x2.shape = {}.".format(
x1.shape, x2.shape))
result = DistArray(x1.shape)
for index in np.ndindex(*result.num_blocks):
result.object_refs[index] = ra.add.remote(x1.object_refs[index],
x2.object_refs[index])
return result
# TODO(rkn): support broadcasting?
@ray.remote
def subtract(x1, x2):
if x1.shape != x2.shape:
raise Exception("subtract expects arguments `x1` and `x2` to have the "
"same shape, but x1.shape = {}, and x2.shape = {}."
.format(x1.shape, x2.shape))
result = DistArray(x1.shape)
for index in np.ndindex(*result.num_blocks):
result.object_refs[index] = ra.subtract.remote(x1.object_refs[index],
x2.object_refs[index])
return result
| {
"content_hash": "5bd695384ac1c043187880ab0f8c8cdb",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 37.97894736842105,
"alnum_prop": 0.5619918699186992,
"repo_name": "robertnishihara/ray",
"id": "e2cd11a15d7f5693598b1cafe69ebbc12bce0edc",
"size": "10824",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/ray/experimental/array/distributed/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from kitsune.products import api
# API urls
urlpatterns = [
url(r'^$', api.ProductList.as_view(), name='product-list'),
url(r'^(?P<product>[^/]+)/topic/?$', api.TopicList.as_view(), name='topic-list'),
url(r'^(?P<product>[^/]+)/topic/(?P<topic>[^/]+)/?$',
api.TopicDetail.as_view(),
name='topic-detail'),
]
| {
"content_hash": "17ec226a449e8b19f5d177f880c328cb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.5876010781671159,
"repo_name": "anushbmx/kitsune",
"id": "9f82c160df567c19dc388308bd4186743de187b5",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitsune/products/urls_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "335184"
},
{
"name": "Dockerfile",
"bytes": "3547"
},
{
"name": "Groovy",
"bytes": "4221"
},
{
"name": "HTML",
"bytes": "628447"
},
{
"name": "JavaScript",
"bytes": "802494"
},
{
"name": "Makefile",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "2994910"
},
{
"name": "Shell",
"bytes": "19325"
},
{
"name": "TSQL",
"bytes": "1035"
}
],
"symlink_target": ""
} |
from fjord.base.templatetags.jinja_helpers import locale_name, urlparams
from fjord.base.tests import TestCase
class TestLocaleName(TestCase):
def test_english(self):
data = [
(u'en-US', u'English (US)'),
(u'es', u'Spanish'),
(u'gd', u'Gaelic (Scotland)'),
(u'xx', u'Unknown')
]
for name, expected in data:
assert unicode(locale_name(name)) == expected
def test_native(self):
data = [
(u'en-US', u'English (US)'),
(u'es', u'Espa\u00f1ol'),
(u'gd', u'G\u00e0idhlig'),
(u'xx', u'Unknown')
]
for name, expected in data:
assert unicode(locale_name(name, native=True)) == expected
class TestUrlParams(TestCase):
def test_utf8_urlencoded_query(self):
"""urlparams should not modify urlencoded unicode."""
# %C3%B1 is urlencoded unicode.
url = u'http://example.com/foo.html?bar=%C3%B1'
assert urlparams(url, hash='biff') == url + u'#biff'
| {
"content_hash": "7117c54029d0f6688a034738737ccb71",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 32.78125,
"alnum_prop": 0.5567206863679695,
"repo_name": "mozilla/fjord",
"id": "cf9d6cdc701ec4c41e142982a6b18b111b92ec4c",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fjord/base/tests/test_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "161912"
},
{
"name": "HTML",
"bytes": "142781"
},
{
"name": "JavaScript",
"bytes": "305185"
},
{
"name": "Python",
"bytes": "903100"
},
{
"name": "Shell",
"bytes": "11313"
},
{
"name": "Smarty",
"bytes": "691"
}
],
"symlink_target": ""
} |
from autobahn.twisted.wamp import ApplicationSession, ApplicationSessionFactory, ApplicationRunner
from twisted.internet import reactor
import multiprocessing
from f1.s.core.multiprocess import Multiprocess
class WampServerRunner:
def __init__(self):
self.url = u'ws://127.0.0.1:8080/ws'
self.realm = u'fmg'
self.runner = ApplicationRunner(url=self.url, realm=self.realm,
# debug=True, debug_wamp=True, debug_app=True
)
def start(self, component):
try:
self.runner.run(component, start_reactor=False)
except:
print(Exception)
return Exception
#
# class RPCWampServerMP:
# def __init__(self, component):
# server = RPCWampServerRunner(component)
# server.start()
# mp = multiprocessing.Process(target=reactor.run, args=())
# mp.start()
| {
"content_hash": "907ab9f3cd931291022f635316a91982",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 98,
"avg_line_length": 33.535714285714285,
"alnum_prop": 0.6059637912673056,
"repo_name": "filemakergarage/zeroclient",
"id": "d232c4b78e7c0195cbfca94112b06bf6cbb8f5a0",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f1/s/core/wampserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163404"
},
{
"name": "Shell",
"bytes": "5072"
}
],
"symlink_target": ""
} |
from django.db import models
from datetime import date
class Position(models.Model):
title = models.CharField(max_length=100)
date = models.DateField(default=date.today())
content = models.TextField()
def __unicode__(self):
return self.title
| {
"content_hash": "e83dfdf2d91db6a026eea1a0920e9001",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 24.454545454545453,
"alnum_prop": 0.6988847583643123,
"repo_name": "Hackfmi/Diaphanum",
"id": "a27af75af3f4825251df37a005c456209f9404e8",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "positions/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4870"
},
{
"name": "JavaScript",
"bytes": "72598"
},
{
"name": "Python",
"bytes": "272926"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import pytest
from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint
from poetry.core.constraints.generic import parse_constraint
@pytest.mark.parametrize(
"input,constraint",
[
("*", AnyConstraint()),
("win32", Constraint("win32", "=")),
("=win32", Constraint("win32", "=")),
("==win32", Constraint("win32", "=")),
("!=win32", Constraint("win32", "!=")),
("!= win32", Constraint("win32", "!=")),
],
)
def test_parse_constraint(input: str, constraint: AnyConstraint | Constraint) -> None:
assert parse_constraint(input) == constraint
@pytest.mark.parametrize(
"input,constraint",
[
(
"!=win32,!=linux",
MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
),
(
"!=win32,!=linux,!=linux2",
MultiConstraint(
Constraint("win32", "!="),
Constraint("linux", "!="),
Constraint("linux2", "!="),
),
),
],
)
def test_parse_constraint_multi(input: str, constraint: MultiConstraint) -> None:
assert parse_constraint(input) == constraint
@pytest.mark.parametrize(
"input,constraint",
[
("win32 || linux", UnionConstraint(Constraint("win32"), Constraint("linux"))),
(
"win32 || !=linux2",
UnionConstraint(Constraint("win32"), Constraint("linux2", "!=")),
),
],
)
def test_parse_constraint_union(input: str, constraint: UnionConstraint) -> None:
assert parse_constraint(input) == constraint
| {
"content_hash": "65421ee227e51d153a57cc34c5e5af87",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 30.508474576271187,
"alnum_prop": 0.5916666666666667,
"repo_name": "python-poetry/poetry-core",
"id": "2707fd92f0abd83ecaf9707a54fb5463bac398da",
"size": "1800",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/constraints/generic/test_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2664"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2084191"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from twisted.conch import recvline
from fake_switches.terminal import TerminalController
class SwitchSSHShell(recvline.HistoricRecvLine):
def __init__(self, user, switch_core):
self.user = user
self.switch_core = switch_core
self.session = None
self.awaiting_keystroke = None
# Hack to get rid of magical characters that reset the screen / clear / goto position 0, 0
def initializeScreen(self):
self.mode = 'insert'
def connectionMade(self):
recvline.HistoricRecvLine.connectionMade(self)
self.session = self.switch_core.launch("ssh", SshTerminalController(
shell=self
))
def lineReceived(self, line):
line = line.decode()
still_listening = self.session.receive(line)
if not still_listening:
self.terminal.loseConnection()
def keystrokeReceived(self, keyID, modifier):
if keyID in self._printableChars:
if self.awaiting_keystroke is not None:
args = self.awaiting_keystroke[1] + [keyID.decode()]
cmd = self.awaiting_keystroke[0]
cmd(*args)
return
super(SwitchSSHShell, self).keystrokeReceived(keyID, modifier)
# replacing behavior of twisted/conch/recvline.py:205
def characterReceived(self, ch, moreCharactersComing):
command_processor = self.get_actual_processor()
if command_processor.replace_input is False:
self.terminal.write(ch)
else:
self.terminal.write((len(ch) * command_processor.replace_input).encode())
if self.mode == 'insert':
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex:self.lineBufferIndex+1] = [ch]
self.lineBufferIndex += 1
def get_actual_processor(self):
proc = self.session.command_processor
while proc.sub_processor is not None:
proc = proc.sub_processor
return proc
class SshTerminalController(TerminalController):
def __init__(self, shell):
self.shell = shell
def write(self, text):
self.shell.terminal.write(text.encode())
def add_any_key_handler(self, callback, *params):
self.shell.awaiting_keystroke = (callback, list(params))
def remove_any_key_handler(self):
self.shell.awaiting_keystroke = None
| {
"content_hash": "0033d258301cfc0fb1c8f6d2270b81c9",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 94,
"avg_line_length": 33.486111111111114,
"alnum_prop": 0.6433015346329324,
"repo_name": "stephanerobert/fake-switches",
"id": "71c75351354e79a93298725a4d83280191dbfd39",
"size": "2985",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fake_switches/terminal/ssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "878"
},
{
"name": "Python",
"bytes": "888666"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
try:
from setuptest import test
except ImportError:
from setuptools.command.test import test
version = __import__('filer').__version__
def read(fname):
# read the contents of a text file
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "django-filer",
version = version,
url = 'http://github.com/stefanfoulis/django-filer',
license = 'BSD',
platforms=['OS Independent'],
description = "A file management application for django that makes handling of files and images a breeze.",
long_description = read('README.rst'),
author = 'Stefan Foulis',
author_email = '[email protected]',
packages=find_packages(),
install_requires = (
'Django>=1.3,<1.5',
'easy-thumbnails>=1.0',
'django-mptt>=0.5.1,<0.6',
'django_polymorphic>=0.2',
),
include_package_data=True,
zip_safe=False,
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
cmdclass={'test': test},
test_suite='setuptest.setuptest.SetupTestSuite',
tests_require=(
'django-setuptest>=0.1.1',
'argparse', # apparently needed by django-setuptest on python 2.6
),
)
| {
"content_hash": "e60797a2f5bdc3fe13964020bf52203e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 111,
"avg_line_length": 30.897959183673468,
"alnum_prop": 0.6221928665785997,
"repo_name": "neoascetic/django-filer",
"id": "324851ba97b643c196f57d04074da51c9d5971ba",
"size": "1514",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "48306"
},
{
"name": "Python",
"bytes": "364063"
},
{
"name": "Shell",
"bytes": "1462"
}
],
"symlink_target": ""
} |
import re
import sys
from datetime import datetime
from subprocess import check_output
import httpx
import semantic_version
from packaging.version import parse
__all__ = [
"is_major_update",
"is_minor_update",
"is_patch_update",
"sorted_versions",
"get_categorized_package_data",
"get_parsed_environment_package_list",
"get_environment_requirements_list",
"parse_requirements_list",
"get_pypi_package_data",
"get_package_update_list",
]
def is_major_update(release, package):
"""
Checks if the release is a major update compared to the package
:param release: semantic_version.Version
:param package: semantic_version.Version
:return: bool
"""
return release in semantic_version.SimpleSpec(">=%s" % package.next_major())
def is_minor_update(release, package):
"""
Checks if the release is a minor update compared to the package
:param release: semantic_version.Version
:param package: semantic_version.Version
:return: bool
"""
return release in semantic_version.SimpleSpec(">=%s,<%s" % (package.next_minor(), package.next_major()))
def is_patch_update(release, package):
"""
Checks if the release is a patch update compared to the package
:param release: semantic_version.Version
:param package: semantic_version.Version
:return: bool
"""
return release in semantic_version.SimpleSpec(">=%s,<%s" % (package.next_patch(), package.next_minor()))
def sorted_versions(versions):
"""
Returns the list of Versions in ascending order
:param versions: semantic_version.Version[]
:return: semantic_version.Version[]
"""
return sorted(
versions,
key=lambda x: semantic_version.Version.coerce(x["version"]),
reverse=True,
)
def get_categorized_package_data(package_data, package_version):
"""
Returns all Versions grouped by type compared to the current package version
:param package_data: dict
:param package_version: semantic_version.Version
:return: {
major_updates: semantic_version.Version[]
minor_updates: semantic_version.Version[]
patch_updates: semantic_version.Version[]
pre_release_updates: semantic_version.Version[]
non_semantic_versions: semantic_version.Version[]
}
"""
major_updates = []
minor_updates = []
patch_updates = []
pre_release_updates = []
non_semantic_versions = []
for release, info in package_data["releases"].items():
parsed_release = parse(release)
upload_time = None
if info:
upload_time = datetime.strptime(info[0]["upload_time"], "%Y-%m-%dT%H:%M:%S")
try:
# Get semantic version of package
release_version = semantic_version.Version.coerce(release)
if not parsed_release.is_prerelease:
# Place package in the appropriate semantic visioning list
if is_major_update(release_version, package_version):
major_updates.append(
{
"version": release,
"upload_time": upload_time,
}
)
elif is_minor_update(release_version, package_version):
minor_updates.append(
{
"version": release,
"upload_time": upload_time,
}
)
elif is_patch_update(release_version, package_version):
patch_updates.append(
{
"version": release,
"upload_time": upload_time,
}
)
else:
pre_release_updates.append({"version": release, "upload_time": upload_time})
except ValueError:
# Keep track of versions that could not be recognized as semantic
non_semantic_versions.append({"version": release, "upload_time": upload_time})
return {
"major_updates": sorted_versions(major_updates),
"minor_updates": sorted_versions(minor_updates),
"patch_updates": sorted_versions(patch_updates),
"pre_release_updates": sorted_versions(pre_release_updates),
"non_semantic_versions": non_semantic_versions,
}
def get_parsed_environment_package_list():
"""
Get a parsed list of packages in the current environment
:return:
"""
return parse_requirements_list(get_environment_requirements_list())
def get_environment_requirements_list():
"""
Take the requirements list from the current running environment
:return: string
"""
requirement_list = []
requirements = check_output([sys.executable, "-m", "pip", "freeze"])
for requirement in requirements.split():
requirement_list.append(requirement.decode("utf-8"))
return requirement_list
def parse_requirements_list(requirements_list):
"""
Take a list and return a list of dicts with {package, versions) based on the requirements specs
:param requirements_list: string
:return: string
"""
req_list = []
for requirement in requirements_list:
requirement_no_comments = requirement.split("#")[0].strip()
# if matching requirement line (Thing==1.2.3), update dict, continue
req_match = re.match(
r"\s*(?P<package>[^\s\[\]]+)(?P<extras>\[\S+\])?==(?P<version>\S+)",
requirement_no_comments,
)
if req_match:
req_list.append(
{
"package": req_match.group("package"),
"version": req_match.group("version"),
}
)
return req_list
async def get_pypi_package_data(package_name, version=None):
"""
Get package data from pypi by the package name
https://wiki.python.org/moin/PyPIJSON
:param package_name: string
:param version: string
:return: dict
"""
pypi_url = "https://pypi.org/pypi"
if version:
package_url = "%s/%s/%s/json" % (
pypi_url,
package_name,
version,
)
else:
package_url = "%s/%s/json" % (
pypi_url,
package_name,
)
async with httpx.AsyncClient() as client:
try:
resp = await client.get(package_url, follow_redirects=True, timeout=None)
except httpx.ConnectError:
raise RuntimeError("Connection error!")
# Package not available on pypi
if resp.is_error:
return None
return resp.json()
async def get_package_update_list(package_name, version):
"""
Return update information of a package from a given version
:param package_name: string
:param version: string
:return: dict
"""
package_version = semantic_version.Version.coerce(version)
# Get package and version data from pypi
package_data = await get_pypi_package_data(package_name)
version_data = await get_pypi_package_data(package_name, version)
# Current release specific information
current_release = ""
current_release_license = ""
# Latest release specific information
latest_release = ""
latest_release_license = ""
# Information about packages
newer_releases = 0
pre_releases = 0
categorized_package_data = {
"major_updates": [],
"minor_updates": [],
"patch_updates": [],
"pre_release_updates": [],
"non_semantic_versions": [],
}
if package_data:
latest_release = package_data["info"]["version"]
latest_release_license = package_data["info"]["license"] if package_data["info"]["license"] else ""
categorized_package_data = get_categorized_package_data(package_data, package_version)
# Get number of newer releases available for the given package, excluding pre_releases and non semantic versions
newer_releases = len(
categorized_package_data["major_updates"]
+ categorized_package_data["minor_updates"]
+ categorized_package_data["patch_updates"]
)
pre_releases = len(categorized_package_data["pre_release_updates"])
if version_data:
current_release = version_data["info"]["version"]
current_release_license = version_data["info"]["license"] if version_data["info"]["license"] else ""
return {
"current_release": current_release,
"current_release_license": current_release_license,
"latest_release": latest_release,
"latest_release_license": latest_release_license,
"newer_releases": newer_releases,
"pre_releases": pre_releases,
**categorized_package_data,
}
| {
"content_hash": "b8fca7860c21f2f56481a0fde4a2cb86",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 120,
"avg_line_length": 31.045296167247386,
"alnum_prop": 0.6016835016835017,
"repo_name": "nezhar/updatable",
"id": "2f91e02b5218135bf9fbdd31c79fa2f8d17ab239",
"size": "8910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "updatable/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56395"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
def set_original_fk(apps, schema_editor):
Document = apps.get_model("document", "Document")
Attachment = apps.get_model("foirequest", "FoiAttachment")
for att in Attachment.objects.filter(document__isnull=False):
Document.objects.filter(id=att.document_id).update(original_id=att.id)
class Migration(migrations.Migration):
dependencies = [
("foirequest", "0019_foiattachment_document"),
("document", "0009_document_original"),
]
operations = [
migrations.RunPython(set_original_fk),
]
| {
"content_hash": "8340a68f64dad745e41ba610fe0222bd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 27.217391304347824,
"alnum_prop": 0.6853035143769968,
"repo_name": "fin/froide",
"id": "be18f17a3139c2a82b112cbec9a71a139208d7b8",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/document/migrations/0010_auto_20180807_1520.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(*, filter: Optional[str] = None, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str", skip_quote=True)
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class AccessReviewScheduleDefinitionsAssignedForMyApprovalOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_07_01_preview.AuthorizationManagementClient`'s
:attr:`access_review_schedule_definitions_assigned_for_my_approval` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, filter: Optional[str] = None, **kwargs: Any) -> Iterable["_models.AccessReviewScheduleDefinition"]:
"""Get access review instances assigned for my approval.
:param filter: The filter to apply on the operation. Other than standard filters, one custom
filter option is supported : 'assignedToMeToReview()'. When one specified
$filter=assignedToMeToReview(), only items that are assigned to the calling user to review are
returned. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewScheduleDefinition or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.authorization.v2021_07_01_preview.models.AccessReviewScheduleDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessReviewScheduleDefinitionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewScheduleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions"} # type: ignore
| {
"content_hash": "077133973998d17977783d9968cc0f29",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 122,
"avg_line_length": 44.83892617449664,
"alnum_prop": 0.6591827570722946,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1cd2a71810c11fd5b7fe0308ebd9c2c4605fc419",
"size": "7181",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_07_01_preview/operations/_access_review_schedule_definitions_assigned_for_my_approval_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adagrad_update_numpy(param, accum, g_t, lr=0.001, epsilon=1e-7):
accum_t = accum + g_t * g_t
param_t = param - lr * g_t / (np.sqrt(accum_t) + epsilon)
return param_t, accum_t
def sparse_adagrad_update_numpy(param,
accum,
gindexs,
gvalues,
lr=0.001,
epsilon=1e-7):
accum_t = copy.deepcopy(accum)
param_t = copy.deepcopy(param)
# first loop accumulates repeated indices if necessary.
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
accum_t[gindex] = accum_t[gindex] + gvalue * gvalue
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
param_t[gindex] = param_t[gindex] - lr * gvalue / (
np.sqrt(accum_t[gindex]) + epsilon)
return param_t, accum_t
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self, use_callable_params=False):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.Adagrad(learning_rate)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, 3.0)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, 3.0)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasic(self):
self.doTestBasic()
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_callable_params=True)
def testBasicWithLearningRateDecay(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, lr_np)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, lr_np)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
ada_opt = adagrad.Adagrad(lr_schedule)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, lr_np)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, lr_np)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = constant_op.constant(3.0)
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, learning_rate)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 3.0, 4.0], var1.eval())
accum0_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
var0_np, accum0_np = sparse_adagrad_update_numpy(
var0_np, accum0_np, grads0_np_indices,
grads0_np[grads0_np_indices], learning_rate)
var1_np, accum1_np = sparse_adagrad_update_numpy(
var1_np, accum1_np, grads1_np_indices,
grads1_np[grads1_np_indices], learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = resource_variable_ops.ResourceVariable(
var_np, dtype=dtype)
aggregated_update_var = resource_variable_ops.ResourceVariable(
var_np, dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesByEmbeddingLookUp(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = lambda: math_ops.reduce_sum( # pylint: disable=g-long-lambda
embedding_ops.embedding_lookup(var_repeated, [0, 0])) # pylint: disable=cell-var-from-loop
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = lambda: 2 * math_ops.reduce_sum( # pylint: disable=g-long-lambda
embedding_ops.embedding_lookup(var_aggregated, [0])) # pylint: disable=cell-var-from-loop
update_op_repeated = adagrad.Adagrad(2.0).minimize(
loss_repeated, var_list=[var_repeated])
update_op_aggregated = adagrad.Adagrad(2.0).minimize(
loss_aggregated, var_list=[var_aggregated])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half]:
with self.cached_session():
shape = [1, 6]
var0_np = np.array([[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257, -0.0105945
]],
dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
grads0_np = np.array([[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05, -8.4877e-05,
-9.48906e-05
]],
dtype=dtype.as_numpy_dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np), constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.Adagrad(1.0)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEqual(slot0.shape, var0.shape)
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEqual(slot1.shape, var1.shape)
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
for _ in range(3):
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, learning_rate)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testConstructAdagradWithLR(self):
opt = adagrad.Adagrad(lr=1.0)
opt_2 = adagrad.Adagrad(learning_rate=0.1, lr=1.0)
opt_3 = adagrad.Adagrad(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
| {
"content_hash": "d6295beaba4ab7f136e6ce3e5ce6b2c3",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 118,
"avg_line_length": 44.69487750556793,
"alnum_prop": 0.6072353996412199,
"repo_name": "alsrgv/tensorflow",
"id": "3ddf9852ba8945f2fc299c034d56d409573735bf",
"size": "20757",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/optimizer_v2/adagrad_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
} |
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
hidden0 = features.dot(W_proj) + b_proj # 1
wordvec, wordvec_cache = word_embedding_forward(captions_in, W_embed) # 2
if self.cell_type == 'rnn':
hidden, hidden_cache = rnn_forward(wordvec, hidden0, Wx, Wh, b) # 3
else:
hidden, hidden_cache = lstm_forward(wordvec, hidden0, Wx, Wh, b)
scores, scores_cache = temporal_affine_forward(hidden, W_vocab, b_vocab) # 4
loss, dscores = temporal_softmax_loss(scores, captions_out, mask) # 5
dhidden, dW_vocab, db_vocab = temporal_affine_backward(dscores,
scores_cache)
if self.cell_type == 'rnn':
dwordvec, dhidden0, dWx, dWh, db = rnn_backward(dhidden, hidden_cache)
else:
dwordvec, dhidden0, dWx, dWh, db = lstm_backward(dhidden, hidden_cache)
dW_embed = word_embedding_backward(dwordvec, wordvec_cache)
db_proj = np.sum(dhidden0, axis=0)
dW_proj = features.T.dot(dhidden0)
grads = {
'W_embed': dW_embed,
'W_proj': dW_proj,
'b_proj': db_proj,
'Wx': dWx,
'Wh': dWh,
'b': db,
'W_vocab': dW_vocab,
'b_vocab': db_vocab
}
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
hidden = features.dot(W_proj) + b_proj
wordvec = W_embed[np.ones(N, dtype=np.int32) * self._start]
cell = np.zeros_like(hidden)
for t in range(max_length):
if self.cell_type == 'rnn':
hidden, _ = rnn_step_forward(wordvec, hidden, Wx, Wh, b)
else:
hidden, cell, _ = lstm_step_forward(wordvec, hidden, cell, Wx, Wh, b)
scores = hidden.dot(W_vocab) + b_vocab
wordidx = np.argmax(scores, axis=1)
captions[:, t] = wordidx
wordvec = W_embed[np.ones(N, dtype=np.int32) * wordidx]
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
| {
"content_hash": "63c6e83160b0f018d2992140611ebc33",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 80,
"avg_line_length": 47.35245901639344,
"alnum_prop": 0.5738272459754198,
"repo_name": "zauonlok/cs231n",
"id": "703ba507702e3a770330efb3579fda14644cd185",
"size": "11554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignment3/cs231n/classifiers/rnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13616923"
},
{
"name": "Python",
"bytes": "249125"
},
{
"name": "Shell",
"bytes": "2658"
}
],
"symlink_target": ""
} |
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_mode = libcudnn.CUDNN_ACTIVATION_SIGMOID
class Sigmoid(function.Function):
"""Logistic sigmoid function."""
def __init__(self, use_cudnn=True):
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype == numpy.float32)
def forward_cpu(self, x):
self.y = 1 / (1 + numpy.exp(-x[0]))
return self.y,
def forward_gpu(self, x):
if cuda.cudnn_enabled and self.use_cudnn:
self.y = cuda.empty_like(x[0])
dtype = x[0].dtype
alpha = numpy.array(1, dtype=dtype).ctypes
beta = numpy.array(0, dtype=dtype).ctypes
handle = cudnn.get_handle()
x_mat = x[0].reshape(x[0].shape[0], -1, 1, 1)
desc = cudnn.create_tensor_descriptor(x_mat)
libcudnn.activationForward(
handle, _mode, alpha.data, desc.value, x_mat.data.ptr,
beta.data, desc.value, self.y.data.ptr)
else:
self.y = cuda.elementwise(
'T x', 'T y', 'y = 1 / (1 + exp(-x))',
'sigmoid_fwd')(x[0])
return self.y,
def backward_cpu(self, x, gy):
return gy[0] * self.y * (1 - self.y),
def backward_gpu(self, x, gy):
if cuda.cudnn_enabled and self.use_cudnn:
gx = cuda.empty_like(x[0])
dtype = x[0].dtype
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
handle = cudnn.get_handle()
y_mat = self.y.reshape(self.y.shape[0], -1, 1, 1)
desc = cudnn.create_tensor_descriptor(y_mat)
libcudnn.activationBackward(
handle, _mode, one.data, desc.value, y_mat.data.ptr,
desc.value, gy[0].data.ptr, desc.value, x[0].data.ptr,
zero.data, desc.value, gx.data.ptr)
else:
gx = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = gy * y * (1 - y)',
'sigmoid_bwd')(self.y, gy[0])
return gx,
def sigmoid(x, use_cudnn=True):
"""Elementwise sigmoid logistic function :math:`f(x)=(1 + \\exp(-x))^{-1}`.
Args:
x (~chainer.Variable): Input variable.
use_cudnn (bool): If True and CuDNN is enabled, then this function uses
CuDNN as the core implementation.
Returns:
~chainer.Variable: Output variable.
"""
return Sigmoid(use_cudnn)(x)
| {
"content_hash": "7f8a04c68aacd48ae4407b2da8028ce0",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 33.03658536585366,
"alnum_prop": 0.5485418973791066,
"repo_name": "muupan/chainer",
"id": "21824f3309e1110f27bbcbe05e9fc68647610110",
"size": "2709",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/functions/activation/sigmoid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18613"
},
{
"name": "Cuda",
"bytes": "6118"
},
{
"name": "Python",
"bytes": "1222891"
}
],
"symlink_target": ""
} |
from ._IntegerNative import IntegerNative
from Cryptodome.Util.number import long_to_bytes, bytes_to_long
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
create_string_buffer,
get_raw_buffer, backend,
c_size_t, c_ulonglong)
from Cryptodome.Random.random import getrandbits
c_defs = """
int monty_pow(const uint8_t *base,
const uint8_t *exp,
const uint8_t *modulus,
uint8_t *out,
size_t len,
uint64_t seed);
"""
_raw_montgomery = load_pycryptodome_raw_lib("Cryptodome.Math._modexp", c_defs)
implementation = {"library": "custom", "api": backend}
class IntegerCustom(IntegerNative):
@staticmethod
def from_bytes(byte_string):
return IntegerCustom(bytes_to_long(byte_string))
def inplace_pow(self, exponent, modulus=None):
exp_value = int(exponent)
if exp_value < 0:
raise ValueError("Exponent must not be negative")
# No modular reduction
if modulus is None:
self._value = pow(self._value, exp_value)
return self
# With modular reduction
mod_value = int(modulus)
if mod_value < 0:
raise ValueError("Modulus must be positive")
if mod_value == 0:
raise ZeroDivisionError("Modulus cannot be zero")
# C extension only works with odd moduli
if (mod_value & 1) == 0:
self._value = pow(self._value, exp_value, mod_value)
return self
# C extension only works with bases smaller than modulus
if self._value >= mod_value:
self._value %= mod_value
max_len = len(long_to_bytes(max(self._value, exp_value, mod_value)))
base_b = long_to_bytes(self._value, max_len)
exp_b = long_to_bytes(exp_value, max_len)
modulus_b = long_to_bytes(mod_value, max_len)
out = create_string_buffer(max_len)
error = _raw_montgomery.monty_pow(
out,
base_b,
exp_b,
modulus_b,
c_size_t(max_len),
c_ulonglong(getrandbits(64))
)
if error:
raise ValueError("monty_pow failed with error: %d" % error)
result = bytes_to_long(get_raw_buffer(out))
self._value = result
return self
| {
"content_hash": "7f4c88bc8cf9bb4662987c3f1dc444b1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 30.876543209876544,
"alnum_prop": 0.5493802479008396,
"repo_name": "kawamon/hue",
"id": "b626014dbe57d5846d868465eed582fb1d9ced6f",
"size": "4016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Math/_IntegerCustom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import apps
from django.core import checks
from django.core.checks.registry import registry
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Checks the entire Django project for potential problems."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*')
parser.add_argument('--tag', '-t', action='append', dest='tags',
help='Run only checks labeled with given tag.')
parser.add_argument('--list-tags', action='store_true', dest='list_tags',
help='List available tags.')
def handle(self, *app_labels, **options):
if options.get('list_tags'):
self.stdout.write('\n'.join(sorted(registry.tags_available())))
return
if app_labels:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
else:
app_configs = None
tags = options.get('tags', None)
if tags and any(not checks.tag_exists(tag) for tag in tags):
invalid_tag = next(tag for tag in tags if not checks.tag_exists(tag))
raise CommandError('There is no system check with the "%s" tag.' % invalid_tag)
self.check(app_configs=app_configs, tags=tags, display_num_errors=True)
| {
"content_hash": "1cd68fc03dd084e2aa50036f7498a739",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 91,
"avg_line_length": 39.30555555555556,
"alnum_prop": 0.6480565371024735,
"repo_name": "dhoffman34/django",
"id": "be9b49d54a83e4265656dfe98d7f045e6f3e7bd5",
"size": "1439",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "django/core/management/commands/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import time
import glob
import os
import platform
import webbrowser
import pygame
import re
import json
import subprocess
import urllib2
if platform.system() == "Windows":
import pygameWindowInfo
from pygame.locals import *
from pygame_helpers import *
from collections import defaultdict
import string
class ItemInfo:
def __init__(self, id, x, y, index, shown=True, floor=False):
self.id = id
self.x = x
self.y = y
self.shown = shown
self.index = index
self.floor = floor
class IsaacTracker:
def __init__(self, verbose=False, debug=False, read_delay=1):
# Class variables
self.verbose = verbose
self.debug = debug
self.text_height = 0
self.text_margin_size = None # will be changed in load_options
self.font = None # will be changed in load_options
self.seek = 0
self.framecount = 0
self.read_delay = read_delay
self.run_ended = True
self.log_not_found = False
self.content = "" #cached contents of log
self.splitfile = [] #log split into lines
# initialize isaac stuff
self.collected_items = [] #list of string item ids with no leading zeros. can also contain "f1" through "f12" for floor markers
self.collected_guppy_items = [] #list of guppy items collected, probably redundant, oh well
self.collected_blind_item_indices = [] #list of indexes into the collected_items array for items that were picked up blind
self.rolled_item_indices = [] #list of indexes into the collected_items array for items that were rerolled
self.collected_item_info = [] #list of "immutable" ItemInfo objects used for determining the layout to draw
self.num_displayed_items = 0
self.selected_item_idx = None
self.seed = ""
self.current_room = ""
self.blind_floor = False
self.getting_start_items = False
self.run_start_line = 0
self.run_start_frame = 0
self.bosses = []
self.last_run = {}
self._image_library = {}
self.filter_list = [] # list of string item ids with zeros stripped, they are items we don't want to see
self.guppy_list = []
self.space_list = []
self.healthonly_list = []
self.items_info = {}
self.item_message_start_time = 0
self.item_pickup_time = 0
self.item_position_index = []
self.current_floor = () # 2-tuple with first value being floor number, second value being alt stage value (0 or 1, r.n.)
self.spawned_coop_baby = 0 # last spawn of a co op baby
self.roll_icon = None
self.blind_icon = None
# Load all of the settings from the "options.json" file
self.load_options()
with open("items.txt", "r") as items_file:
self.items_info = json.load(items_file)
for itemid, item in self.items_info.iteritems():
if not item["shown"]:
self.filter_list.append(itemid.lstrip("0"))
if "guppy" in item and item["guppy"]:
self.guppy_list.append(itemid.lstrip("0"))
if "space" in item and item["space"]:
self.space_list.append(itemid.lstrip("0"))
if "healthonly" in item and item["healthonly"]:
self.healthonly_list.append(itemid.lstrip("0"))
self.floor_id_to_label = {
"f1": "B1",
"f2": "B2",
"f3": "C1",
"f4": "C2",
"f5": "D1",
"f6": "D2",
"f7": "W1",
"f8": "W2",
"f9": "SHEOL",
"f10": "CATH",
"f11": "DARK",
"f12": "CHEST",
"f1x": "BXL",
"f3x": "CXL",
"f5x": "DXL",
"f7x": "WXL",
}
def load_options(self):
with open("options.json", "r") as json_file:
self.options = json.load(json_file)
# anything that gets calculated and cached based on something in options now needs to be flushed
self.text_margin_size = int(8 * self.options["size_multiplier"])
# font can only be initialized after pygame is set up
if self.font:
self.font = pygame.font.SysFont(self.options['show_font'], int(8 * self.options["size_multiplier"]), bold=self.options["bold_font"])
self._image_library = {}
self.roll_icon = pygame.transform.scale(self.get_image(self.id_to_image("284")), (16 * self.options["size_multiplier"], 16 * self.options["size_multiplier"]))
self.blind_icon = pygame.transform.scale(self.get_image("collectibles/questionmark.png"), (16 * self.options["size_multiplier"], 16 * self.options["size_multiplier"]))
def save_options(self):
with open("options.json", "w") as json_file:
json.dump(self.options, json_file, indent=3, sort_keys=True)
# just for debugging
def log_msg(self, msg, level):
if level=="V" and self.verbose: print msg
if level=="D" and self.debug: print msg
# just for the suffix of boss kill number lol
def suffix(self, d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def check_end_run(self,line,cur_line_num):
if not self.run_ended:
died_to = ""
end_type = ""
if self.bosses and self.bosses[-1][0] in ['???','The Lamb','Mega Satan']:
end_type = "Won"
elif (self.seed != '') and line.startswith('RNG Start Seed:'):
end_type = "Reset"
elif line.startswith('Game Over.'):
end_type = "Death"
died_to = re.search('(?i)Killed by \((.*)\) spawned',line).group(1)
if end_type:
self.last_run = {
"bosses":self.bosses
, "items":self.collected_items
, "seed":self.seed
, "died_to":died_to
, "end_type":end_type
}
self.run_ended = True
self.log_msg("End of Run! %s" % self.last_run,"D")
if end_type != "Reset":
self.save_file(self.run_start_line,cur_line_num, self.seed)
def save_file(self, start, end, seed):
self.mkdir("run_logs")
timestamp = int(time.time())
seed = seed.replace(" ","")
data = "\n".join(self.splitfile[start:end+1])
data = "%s\nRUN_OVER_LINE\n%s" % (data, self.last_run)
with open("run_logs/%s%s.log" % (seed,timestamp),'wb') as f:
f.write(data)
def mkdir(self, dn):
import os
if not os.path.isdir(dn):
os.mkdir(dn)
# image library stuff, from openbookproject.net
def get_image(self, path):
image = self._image_library.get(path)
if image is None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
scaled_image = pygame.transform.scale(image, (int(image.get_size()[0] * self.options["size_multiplier"]), int(image.get_size()[1] * self.options["size_multiplier"])))
self._image_library[path] = scaled_image
return image
def build_position_index(self):
w = self.options["width"]
h = self.options["height"]
# 2d array of size h, w
self.item_position_index = [[None for x in xrange(w)] for y in xrange(h)]
self.num_displayed_items = 0
for item in self.collected_item_info:
if item.shown and not item.floor:
self.num_displayed_items += 1
for y in range(int(item.y), int(item.y + 32 * self.options["size_multiplier"])):
if y >= h:
continue
row = self.item_position_index[y]
for x in range(int(item.x), int(item.x + 32 * self.options["size_multiplier"])):
if x >= w:
continue
row[x] = item.index
def reflow(self):
item_icon_size = int(self.options["default_spacing"] * self.options["size_multiplier"] * .5)
item_icon_footprint = item_icon_size
result = self.try_layout(item_icon_footprint, item_icon_size, False)
while result is None:
item_icon_footprint -= 1
if item_icon_footprint < self.options["min_spacing"] or item_icon_footprint < 4:
result = self.try_layout(item_icon_footprint, item_icon_size, True)
else:
result = self.try_layout(item_icon_footprint, item_icon_size, False)
self.collected_item_info = result
self.build_position_index()
def try_layout(self, icon_footprint, icon_size, force_layout):
new_item_info = []
cur_row = 0
cur_column = 0
index = 0
vert_padding = 0
if self.options['show_floors']:
vert_padding = self.text_margin_size
for item_id in self.collected_items:
if item_id not in self.filter_list \
and (not item_id in self.healthonly_list or self.options["show_health_ups"])\
and (not item_id in self.space_list or item_id in self.guppy_list or self.options["show_space_items"])\
and (not index in self.rolled_item_indices or self.options["show_rerolled_items"]):
#check to see if we are about to go off the right edge
if icon_footprint * (cur_column) + 32 * self.options["size_multiplier"] > self.options["width"]:
if (not force_layout) and self.text_height + (icon_footprint + vert_padding) * (cur_row + 1) + icon_size + vert_padding > self.options["height"]:
return None
cur_row += 1
cur_column = 0
if item_id.startswith('f'):
item_info = ItemInfo(id = item_id,
x = icon_footprint * cur_column,
y = self.text_height + (icon_footprint * cur_row) + (vert_padding * (cur_row + 1)),
shown = True,
index = index,
floor = True)
new_item_info.append(item_info)
else:
item_info = ItemInfo(id = item_id,
x = icon_footprint * cur_column,
y = self.text_height + (icon_footprint * cur_row) + (vert_padding * (cur_row + 1)),
shown = True,
index = index)
new_item_info.append(item_info)
cur_column += 1
else:
item_info = ItemInfo(id = item_id,
x = icon_footprint * cur_column,
y = self.text_height + (icon_footprint * cur_row) + (vert_padding * (cur_row + 1)),
shown = False,
index = index)
new_item_info.append(item_info)
index += 1
return new_item_info
def generateItemDescription(self, item_info):
desc = ""
text = item_info.get("text")
dmg = item_info.get("dmg")
dmgx = item_info.get("dmgx")
delay = item_info.get("delay")
delayx = item_info.get("delayx")
health = item_info.get("health")
speed = item_info.get("speed")
shotspeed = item_info.get("shotspeed")
tearrange = item_info.get("range")
height = item_info.get("height")
tears = item_info.get("tears")
soulhearts = item_info.get("soulhearts")
sinhearts = item_info.get("sinhearts")
if dmg:
desc += dmg + " dmg, "
if dmgx:
desc += "x" + dmgx + " dmg, "
if tears:
desc += tears + " tears, "
if delay:
desc += delay + " tear delay, "
if delayx:
desc += "x" + delayx + " tear delay, "
if shotspeed:
desc += shotspeed + " shotspeed, "
if tearrange:
desc += tearrange + " range, "
if height:
desc += height + " height, "
if speed:
desc += speed + " speed, "
if health:
desc += health + " health, "
if soulhearts:
desc += soulhearts + " soul hearts, "
if sinhearts:
desc += sinhearts + " sin hearts, "
if text:
desc += text
if desc.endswith(", "):
desc = desc[:-2]
if len(desc) > 0:
desc = ": " + desc
return desc
def color(self, string):
return pygame.color.Color(str(string))
def load_selected_detail_page(self):
# TODO open browser if this is not None
if not self.selected_item_idx:
return
url = self.options.get("item_details_link")
if not url:
return
item_id = self.collected_item_info[self.selected_item_idx].id
url = url.replace("$ID", item_id)
webbrowser.open(url, autoraise=True)
return
def adjust_selected_item(self, amount):
itemlength = len(self.collected_item_info)
if self.num_displayed_items < 1:
return
if self.selected_item_idx is None and amount > 0:
self.selected_item_idx = 0
elif self.selected_item_idx is None and amount < 0:
self.selected_item_idx = itemlength - 1
else:
done = False
while not done:
self.selected_item_idx += amount
# clamp it to the range (0, length)
self.selected_item_idx = (self.selected_item_idx + itemlength) % itemlength
selected_type = self.collected_item_info[self.selected_item_idx]
done = selected_type.shown and not selected_type.floor
self.item_message_start_time = self.framecount
def item_message_countdown_in_progress(self):
return self.item_message_start_time + (self.options["message_duration"] * self.options["framerate_limit"]) > self.framecount
def item_pickup_countdown_in_progress(self):
return self.item_pickup_time + (self.options["message_duration"] * self.options["framerate_limit"]) > self.framecount
def write_item_text(self, my_font, screen):
item_idx = self.selected_item_idx
if item_idx is None and self.item_pickup_countdown_in_progress():
item_idx = -1
if item_idx is None or len(self.collected_items) < item_idx :
return False
item = self.collected_items[item_idx]
if item.startswith('f'):
return False
id_padded = item.zfill(3)
item_info = self.items_info[id_padded]
desc = self.generateItemDescription(item_info)
self.text_height = draw_text(
screen,
"%s%s" % (item_info["name"], desc),
self.color(self.options["text_color"]),
pygame.Rect(2, 2, self.options["width"] - 2, self.options["height"] - 2),
my_font,
aa=True,
wrap=self.options["word_wrap"]
)
return True
def load_log_file(self):
self.log_not_found = False
path = None
logfile_location = ""
if platform.system() == "Windows":
logfile_location = os.environ['USERPROFILE'] + '/Documents/My Games/Binding of Isaac Rebirth/'
elif platform.system() == "Linux":
logfile_location = os.path.expanduser('~') + '/.local/share/binding of isaac rebirth/'
elif platform.system() == "Darwin":
logfile_location = os.path.expanduser('~') + '/Library/Application Support/Binding of Isaac Rebirth/'
for check in ('../log.txt', logfile_location + 'log.txt'):
if os.path.isfile(check):
path = check
break
if path == None:
self.log_not_found = True
return
cached_length = len(self.content)
file_size = os.path.getsize(path)
if cached_length > file_size or cached_length == 0: # New log file or first time loading the log
self.content = open(path, 'rb').read()
elif cached_length < file_size: # append existing content
f = open(path, 'rb')
f.seek(cached_length + 1)
self.content += f.read()
#returns text to put in the titlebar
def check_for_update(self):
try:
github_info_json = urllib2.urlopen("https://api.github.com/repos/Hyphen-ated/RebirthItemTracker/releases/latest").read()
info = json.loads(github_info_json)
latest_version = info["name"]
with open('version.txt', 'r') as f:
if(latest_version != f.read()):
return " (new version available)"
except Exception as e:
self.log_msg("Failed to find update info: " + e.message, "D")
return ""
def id_to_image(self, id):
return 'collectibles/collectibles_%s.png' % id.zfill(3)
def draw_floor(self, f, screen, my_font):
pygame.draw.lines(
screen,
self.color(self.options["text_color"]),
False,
((f.x + 2, int(f.y + 24 * self.options["size_multiplier"])), (f.x + 2, f.y), (int(f.x + 16 * self.options["size_multiplier"]), f.y))
)
image = my_font.render(self.floor_id_to_label[f.id], True, self.color(self.options["text_color"]))
screen.blit(image, (f.x + 4, f.y - self.text_margin_size))
def draw_item(self, item, screen):
image = self.get_image(self.id_to_image(item.id))
screen.blit(image, (item.x, item.y))
if item.index in self.rolled_item_indices:
screen.blit(self.roll_icon, (item.x,item.y))
if self.options["show_blind_icon"] and item.index in self.collected_blind_item_indices:
screen.blit(self.blind_icon, (item.x,item.y + self.options["size_multiplier"] * 12))
def run(self):
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self.options["xposition"],self.options["yposition"])
# initialize pygame system stuff
pygame.init()
update_notifier = self.check_for_update()
pygame.display.set_caption("Rebirth Item Tracker" + update_notifier)
screen = pygame.display.set_mode((self.options["width"], self.options["height"]), RESIZABLE)
self.font = pygame.font.SysFont(self.options["show_font"], int(8 * self.options["size_multiplier"]), bold=self.options["bold_font"])
pygame.display.set_icon(self.get_image("collectibles/collectibles_333.png"))
done = False
clock = pygame.time.Clock()
winInfo = None
if platform.system() == "Windows":
winInfo = pygameWindowInfo.PygameWindowInfo()
del os.environ['SDL_VIDEO_WINDOW_POS']
while not done:
# pygame logic
for event in pygame.event.get():
if event.type == pygame.QUIT:
if platform.system() == "Windows":
winPos = winInfo.getScreenPosition()
self.options["xposition"] = winPos["left"]
self.options["yposition"] = winPos["top"]
self.save_options()
done = True
elif event.type == VIDEORESIZE:
screen=pygame.display.set_mode(event.dict['size'], RESIZABLE)
self.options["width"] = event.dict["w"]
self.options["height"] = event.dict["h"]
self.save_options()
self.reflow()
pygame.display.flip()
elif event.type == MOUSEMOTION:
if pygame.mouse.get_focused():
x, y = pygame.mouse.get_pos()
if y < len(self.item_position_index):
selected_row = self.item_position_index[y]
if x < len(selected_row):
self.selected_item_idx = selected_row[x]
if self.selected_item_idx:
self.item_message_start_time = self.framecount
elif event.type == KEYDOWN:
if len(self.collected_items) > 0:
if event.key == pygame.K_RIGHT:
self.adjust_selected_item(1)
elif event.key == pygame.K_LEFT:
self.adjust_selected_item(-1)
elif event.key == pygame.K_RETURN:
self.load_selected_detail_page()
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
self.load_selected_detail_page()
if event.button == 3:
if os.path.isfile("optionpicker/option_picker.exe"):
self.log_msg("Starting option picker from .exe","D")
subprocess.call(os.path.join('optionpicker',"option_picker.exe"),shell=True)
elif os.path.isfile("option_picker.py"):
self.log_msg("Starting option picker from .py","D")
subprocess.call("python option_picker.py",shell=True)
else:
self.log_msg("No option_picker found!","D")
self.load_options()
self.selected_item_idx = None # Clear this to avoid overlapping an item that may have been hidden
self.reflow()
screen.fill(self.color(self.options["background_color"]))
clock.tick(int(self.options["framerate_limit"]))
if self.log_not_found:
draw_text(
screen,
"log.txt not found. Put the RebirthItemTracker folder inside the isaac folder, next to log.txt",
self.color(self.options["text_color"]),
pygame.Rect(2, 2, self.options["width"] - 2, self.options["height"] - 2),
self.font,
aa=True,
wrap=True
)
# 19 pixels is the default line height, but we don't know what the line height is with respect to the user's particular size_multiplier.
# Thus, we can just draw a single space to ensure that the spacing is consistent whether text happens to be showing or not.
if self.options["show_description"] or self.options["show_custom_message"]:
self.text_height = draw_text(
screen,
" ",
self.color(self.options["text_color"]),
pygame.Rect(2, 2, self.options["width"] - 2, self.options["height"] - 2),
self.font,
aa=True,
wrap=self.options["word_wrap"]
)
else:
self.text_height = 0
text_written = False
# draw item pickup text, if applicable
if (len(self.collected_items) > 0
and self.options["show_description"]
and self.run_start_frame + 120 < self.framecount
and self.item_message_countdown_in_progress()):
text_written = self.write_item_text(self.font, screen)
if not text_written and self.options["show_custom_message"] and not self.log_not_found:
# draw seed/guppy text:
seed = self.seed
if len(self.collected_guppy_items) >= 3:
guppy = "yes"
else:
guppy = str(len(self.collected_guppy_items))
# Use vformat to handle the case where the user adds an undefined
# placeholder in default_message
message = string.Formatter().vformat(self.options["custom_message"],
(), defaultdict(str, seed=seed,
guppy=guppy))
self.text_height = draw_text(screen,
message,
self.color(self.options["text_color"]),
pygame.Rect(2,2,self.options["width"]-2,self.options["height"]-2),
self.font,
aa=True, wrap=self.options["word_wrap"])
self.reflow()
if not self.item_message_countdown_in_progress():
self.selected_item_idx = None
floor_to_draw = None
# draw items on screen, excluding filtered items:
for item in self.collected_item_info:
if item.shown:
if item.floor:
floor_to_draw = item
else:
self.draw_item(item, screen)
#don't draw a floor until we hit the next item (this way multiple floors in a row collapse)
if floor_to_draw and self.options["show_floors"]:
self.draw_floor(floor_to_draw, screen, self.font)
# also draw the floor if we hit the end, so the current floor is visible
if floor_to_draw and self.options["show_floors"]:
self.draw_floor(floor_to_draw, screen, self.font)
if (self.selected_item_idx
and self.selected_item_idx < len(self.collected_item_info)
and self.item_message_countdown_in_progress()):
item = self.collected_item_info[self.selected_item_idx]
if item.id not in self.floor_id_to_label:
screen.blit(self.get_image(self.id_to_image(item.id)), (item.x, item.y))
pygame.draw.rect(
screen,
self.color(self.options["text_color"]),
(item.x, item.y, int(32 * self.options["size_multiplier"]), int(32 * self.options["size_multiplier"])),
2
)
pygame.display.flip()
self.framecount += 1
# process log stuff every read_delay seconds. making sure to truncate to an integer or else it might never mod to 0
if self.framecount % int(self.options["framerate_limit"]*self.read_delay) == 0:
self.load_log_file()
self.splitfile = self.content.splitlines()
# return to start if seek passes the end of the file (usually b/c log file restarted)
if self.seek > len(self.splitfile):
self.log_msg("Current line number longer than lines in file, returning to start of file", "D")
self.seek = 0
should_reflow = False
# process log's new output
for current_line_number,line in enumerate(self.splitfile[self.seek:]):
self.log_msg(line,"V")
# end floor boss defeated, hopefully?
if line.startswith('Mom clear time:'):
kill_time = int(line.split(" ")[-1])
# if you re-enter a room you get a "mom clear time" again, check for that.
# can you fight the same boss twice?
if self.current_room not in [x[0] for x in self.bosses]:
self.bosses.append((self.current_room, kill_time))
self.log_msg("Defeated %s%s boss %s at time %s" % (len(self.bosses), self.suffix(len(self.bosses)), self.current_room, kill_time), "D")
# check + handle the end of the run (order important here!)
# we want it after boss kill (so we have that handled) but before RNG Start Seed (so we can handle that)
self.check_end_run(line, current_line_number + self.seek)
# start of a run
if line.startswith('RNG Start Seed:'):
# this assumes a fixed width, but from what i see it seems safe
self.seed = line[16:25]
self.log_msg("Starting new run, seed: %s" % self.seed, "D")
self.run_start_frame = self.framecount
self.rolled_item_indices = []
self.collected_items = []
self.collected_guppy_items = []
self.collected_blind_item_indices = []
self.log_msg("Emptied item array", "D")
self.bosses = []
self.log_msg("Emptied boss array", "D")
self.run_start_line = current_line_number + self.seek
self.run_ended = False
with open("seed.txt", "w") as f:
f.write(self.seed)
# entered a room, use to keep track of bosses
if line.startswith('Room'):
self.current_room = re.search('\((.*)\)', line).group(1)
if 'Start Room' not in line:
self.getting_start_items = False
self.log_msg("Entered room: %s" % self.current_room, "D")
if line.startswith('Level::Init'):
self.current_floor = tuple([re.search("Level::Init m_Stage (\d+), m_AltStage (\d+)", line).group(x) for x in [1,2]])
self.blind_floor = False # assume floors aren't blind until we see they are
self.getting_start_items = True
floor = int(self.current_floor[0])
alt = self.current_floor[1]
# special handling for cath and chest
if alt == '1' and (floor == 9 or floor == 11):
floor += 1
self.collected_items.append('f' + str(floor))
should_reflow = True
if line.startswith('Curse of the Labyrinth!'):
#it SHOULD always begin with f (that is, it's a floor) because this line only comes right after the floor line
if self.collected_items[-1].startswith('f'):
self.collected_items[-1] += 'x'
if line.startswith('Curse of Blind'):
self.blind_floor = True
if line.startswith('Spawn co-player!'):
self.spawned_coop_baby = current_line_number + self.seek
if re.search("Added \d+ Collectibles", line):
self.log_msg("Reroll detected!","D")
self.rolled_item_indices = [index for index,item in enumerate(self.collected_items) if item[0] != 'f']
if line.startswith('Adding collectible'):
if len(self.splitfile) > 1 and self.splitfile[current_line_number + self.seek - 1] == line:
self.log_msg("Skipped duplicate item line from baby presence","D")
continue
# hacky string manip, idgaf
space_split = line.split(" ")
# string has the form "Adding collectible 105 (The D6)"
item_id = space_split[2]
if ((current_line_number + self.seek) - self.spawned_coop_baby) < (len(self.collected_items) + 10) and item_id in self.collected_items:
self.log_msg("Skipped duplicate item line from baby entry","D")
continue
item_name = " ".join(space_split[3:])[1:-1]
self.log_msg("Picked up item. id: %s, name: %s" % (item_id, item_name),"D")
id_padded = item_id.zfill(3)
item_info = self.items_info[id_padded]
with open("itemInfo.txt", "w") as f:
desc = self.generateItemDescription(item_info)
f.write(item_info["name"] + ":" + desc)
# ignore repeated pickups of space bar items
if not (item_info.get("space") and item_id in self.collected_items):
self.collected_items.append(item_id)
self.item_message_start_time = self.framecount
self.item_pickup_time = self.framecount
else:
self.log_msg("Skipped adding item %s to avoid space-bar duplicate" % item_id,"D")
if "guppy" in item_info and item_info.get("guppy") and item_id not in self.collected_guppy_items:
self.collected_guppy_items.append(item_id)
if self.blind_floor and not self.getting_start_items:
# the item we just picked up was picked up blind, so add its index here to track that fact
self.collected_blind_item_indices.append(len(self.collected_items) - 1)
should_reflow = True
self.seek = len(self.splitfile)
if should_reflow:
self.reflow()
try:
rt = IsaacTracker(verbose=False, debug=False)
rt.run()
except Exception as e:
import traceback
traceback.print_exc()
| {
"content_hash": "5d6ce4da31b88b23971e7623542534ca",
"timestamp": "",
"source": "github",
"line_count": 708,
"max_line_length": 172,
"avg_line_length": 41.820621468926554,
"alnum_prop": 0.5950893309466716,
"repo_name": "Brett824/RebirthItemTracker",
"id": "917a57aa8fe3a591f58c57f34b37836d0ad06b56",
"size": "29609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "item_tracker.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "48269"
}
],
"symlink_target": ""
} |
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| {
"content_hash": "f5a10bb57283eb248cbf83d66a27aa7d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 27,
"avg_line_length": 20.5,
"alnum_prop": 0.6585365853658537,
"repo_name": "mr-uuid/snippets",
"id": "734058dc029609783fdcbfad25bce7f077b16382",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/packages/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1274"
},
{
"name": "Python",
"bytes": "38641"
},
{
"name": "Shell",
"bytes": "1839"
}
],
"symlink_target": ""
} |
import os
import json
import time
import socket
from importlib import import_module
import jujuresources
from path import Path
from jujubigdata import utils
import requests
from urllib.parse import urljoin
from subprocess import call
from charmhelpers.core import unitdata, hookenv, host
from charmhelpers import fetch
from charms import layer
from charms.templating.jinja2 import render
class Zeppelin(object):
"""
This class manages the Zeppelin deployment steps.
:param DistConfig dist_config: The configuration container object needed.
"""
@classmethod
def factory(cls):
"""
Dynamically instantiate this or a subclass, to allow other layers
to override portions of this implementation.
"""
impl = layer.options('apache-zeppelin')['implementation_class']
module_name, cls_name = impl.rsplit('.', 1)
cls = getattr(import_module(module_name), cls_name)
return cls()
def __init__(self, dist_config=None):
self.dist_config = dist_config or utils.DistConfig()
self.resources = {
'zeppelin': 'zeppelin-%s' % utils.cpu_arch(),
}
def verify_resources(self):
try:
filename = hookenv.resource_get('zeppelin')
if not filename:
return False
if Path(filename).size == 0:
# work around charm store resource upload issue
# by falling-back to pulling from S3
raise NotImplementedError()
return True
except NotImplementedError:
if not jujuresources.resource_defined(self.resources['zeppelin']):
return False
return utils.verify_resources(*self.resources.values())()
def install(self, force=False):
'''
Create the directories. This method is to be called only once.
:param bool force: Force the execution of the installation even if this
is not the first installation attempt.
'''
destination = self.dist_config.path('zeppelin')
if not self.verify_resources():
return False
if destination.exists() and not force:
return True
try:
filename = hookenv.resource_get('zeppelin')
if not filename:
return False
if Path(filename).size == 0:
# work around charm store resource upload issue
# by falling-back to pulling from S3
raise NotImplementedError()
destination.rmtree_p() # if reinstalling
extracted = Path(fetch.install_remote('file://' + filename))
extracted.dirs()[0].copytree(destination) # only copy nested dir
except NotImplementedError:
if not jujuresources.resource_defined(self.resources['zeppelin']):
return False
if not utils.verify_resources(*self.resources.values())():
return False
jujuresources.install(self.resources['zeppelin'],
destination=destination,
skip_top_level=True)
self.dist_config.add_dirs()
self.dist_config.add_packages()
return True
def setup_zeppelin(self):
self.setup_zeppelin_config()
self.setup_init_scripts()
self.setup_zeppelin_tutorial()
def setup_zeppelin_config(self):
'''
copy the default configuration files to zeppelin_conf property
'''
default_conf = self.dist_config.path('zeppelin') / 'conf'
zeppelin_conf = self.dist_config.path('zeppelin_conf')
zeppelin_conf.rmtree_p()
default_conf.copytree(zeppelin_conf)
zeppelin_env = self.dist_config.path('zeppelin_conf') / 'zeppelin-env.sh'
if not zeppelin_env.exists():
(self.dist_config.path('zeppelin_conf') / 'zeppelin-env.sh.template').copy(zeppelin_env)
zeppelin_site = self.dist_config.path('zeppelin_conf') / 'zeppelin-site.xml'
if not zeppelin_site.exists():
(self.dist_config.path('zeppelin_conf') / 'zeppelin-site.xml.template').copy(zeppelin_site)
def setup_init_scripts(self):
if host.init_is_systemd():
template_path = '/etc/systemd/system/zeppelin.service'
template_name = 'systemd.conf'
else:
template_path = '/etc/init/zeppelin.conf'
template_name = 'upstart.conf'
if os.path.exists(template_path):
template_path_backup = "{}.backup".format(template_path)
if os.path.exists(template_path_backup):
os.remove(template_path_backup)
os.rename(template_path, template_path_backup)
render(
template_name,
template_path,
context={
'zeppelin_home': self.dist_config.path('zeppelin'),
'zeppelin_conf': self.dist_config.path('zeppelin_conf')
},
)
if host.init_is_systemd():
utils.run_as('root', 'systemctl', 'enable', 'zeppelin.service')
utils.run_as('root', 'systemctl', 'daemon-reload')
def setup_zeppelin_tutorial(self):
# The default zepp tutorial doesn't work with spark+hdfs (which is our
# default env). Include our own tutorial, which does work in a
# spark+hdfs env. Inspiration for this notebook came from here:
# https://github.com/apache/incubator-zeppelin/pull/46
notebook_dir = self.dist_config.path('zeppelin_notebooks')
dist_notebook_dir = self.dist_config.path('zeppelin') / 'notebook'
dist_tutorial_dir = dist_notebook_dir.dirs()[0]
notebook_dir.rmtree_p()
dist_tutorial_dir.move(notebook_dir)
self.copy_tutorial("hdfs-tutorial")
self.copy_tutorial("flume-tutorial")
dist_notebook_dir.rmtree_p()
# move the tutorial dir included in the tarball to our notebook dir and
# symlink that dir under our zeppelin home. we've seen issues where
# zepp doesn't honor ZEPPELIN_NOTEBOOK_DIR and instead looks for
# notebooks in ZEPPELIN_HOME/notebook.
notebook_dir.symlink(dist_notebook_dir)
# make sure the notebook dir's contents are owned by our user
cmd = "chown -R ubuntu:hadoop {}".format(notebook_dir)
call(cmd.split())
def copy_tutorial(self, tutorial_name):
tutorial_source = Path('resources/{}'.format(tutorial_name))
tutorial_source.copytree(self.dist_config.path('zeppelin_notebooks') / tutorial_name)
def configure_zeppelin(self):
'''
Configure zeppelin environment for all users
'''
zeppelin_bin = self.dist_config.path('zeppelin') / 'bin'
with utils.environment_edit_in_place('/etc/environment') as env:
if zeppelin_bin not in env['PATH']:
env['PATH'] = ':'.join([env['PATH'], zeppelin_bin])
env['ZEPPELIN_CONF_DIR'] = self.dist_config.path('zeppelin_conf')
zeppelin_site = self.dist_config.path('zeppelin_conf') / 'zeppelin-site.xml'
with utils.xmlpropmap_edit_in_place(zeppelin_site) as xml:
xml['zeppelin.server.port'] = self.dist_config.port('zeppelin')
xml['zeppelin.notebook.dir'] = self.dist_config.path('zeppelin_notebooks')
etc_env = utils.read_etc_env()
hadoop_conf_dir = etc_env.get('HADOOP_CONF_DIR', '/etc/hadoop/conf')
hadoop_extra_classpath = etc_env.get('HADOOP_EXTRA_CLASSPATH', '')
spark_home = etc_env.get('SPARK_HOME', '/usr/lib/spark')
spark_driver_mem = etc_env.get('SPARK_DRIVER_MEMORY', '1g')
spark_exe_mode = os.environ.get('MASTER', 'yarn-client')
spark_executor_mem = etc_env.get('SPARK_EXECUTOR_MEMORY', '1g')
zeppelin_env = self.dist_config.path('zeppelin_conf') / 'zeppelin-env.sh'
with open(zeppelin_env, "a") as f:
f.write('export ZEPPELIN_CLASSPATH_OVERRIDES={}\n'.format(hadoop_extra_classpath))
f.write('export ZEPPELIN_HOME={}\n'.format(self.dist_config.path('zeppelin')))
f.write('export ZEPPELIN_JAVA_OPTS="-Dspark.driver.memory={} -Dspark.executor.memory={}"\n'.format(
spark_driver_mem,
spark_executor_mem))
f.write('export ZEPPELIN_LOG_DIR={}\n'.format(self.dist_config.path('zeppelin_logs')))
f.write('export ZEPPELIN_MEM="-Xms128m -Xmx1024m -XX:MaxPermSize=512m"\n')
f.write('export ZEPPELIN_NOTEBOOK_DIR={}\n'.format(self.dist_config.path('zeppelin_notebooks')))
f.write('export SPARK_HOME={}\n'.format(spark_home))
f.write('export SPARK_SUBMIT_OPTIONS="--driver-memory {} --executor-memory {}"\n'.format(
spark_driver_mem,
spark_executor_mem))
f.write('export HADOOP_CONF_DIR={}\n'.format(hadoop_conf_dir))
f.write('export PYTHONPATH={s}/python:{s}/python/lib/py4j-0.8.2.1-src.zip\n'.format(s=spark_home))
f.write('export MASTER={}\n'.format(spark_exe_mode))
# User needs write access to zepp's conf to write interpreter.json
# on server start. chown the whole conf dir, though we could probably
# touch that file and chown it, leaving the rest owned as root:root.
# TODO: weigh implications of have zepp's conf dir owned by non-root.
cmd = "chown -R ubuntu:hadoop {}".format(self.dist_config.path('zeppelin_conf'))
call(cmd.split())
def update_master(self, master_url, master_ip):
api = ZeppelinAPI()
api.modify_interpreter('spark', properties={
'master': master_url,
})
self.restart()
def start(self):
# Start if we're not already running. We currently dont have any
# runtime config options, so no need to restart when hooks fire.
if not utils.jps("zeppelin"):
host.service_start('zeppelin')
# wait up to 30s for server to start responding, lest API requests fail
self.wait_for_api(30)
def check_connect(self, addr, port):
try:
with socket.create_connection((addr, port), timeout=10):
return True
except OSError:
return False
def wait_for_api(self, timeout):
start = time.time()
while time.time() - start < timeout:
if self.check_connect('localhost', self.dist_config.port('zeppelin')):
return True
time.sleep(2)
raise utils.TimeoutError('Timed-out waiting for connection to Zeppelin')
def wait_for_stop(self, timeout):
start = time.time()
while utils.jps("zeppelin"):
time.sleep(1)
if time.time() - start > timeout:
raise utils.TimeoutError('Zeppelin did not stop')
def stop(self):
if utils.jps("zeppelin"):
host.service_stop('zeppelin')
# wait for the process to stop, since issuing a start while the
# process is still running (i.e., restart) could cause it to not
# start up again
self.wait_for_stop(30)
def restart(self):
self.stop()
self.start()
def open_ports(self):
for port in self.dist_config.exposed_ports('zeppelin'):
hookenv.open_port(port)
def close_ports(self):
for port in self.dist_config.exposed_ports('zeppelin'):
hookenv.close_port(port)
def cleanup(self):
self.dist_config.remove_dirs()
unitdata.kv().set('zeppelin.installed', False)
class ZeppelinAPI(object):
"""
Helper for interacting with the Appache Zeppelin REST API.
"""
def _url(self, *parts):
dc = utils.DistConfig()
url = 'http://localhost:{}/api/'.format(dc.port('zeppelin'))
for part in parts:
url = urljoin(url, part)
return url
def import_notebook(self, contents):
response = requests.post(self._url('notebook'), data=contents)
if response.status_code != 201:
return None
return response.json()['body']
def delete_notebook(self, notebook_id):
requests.delete(self._url('notebook/', notebook_id))
def modify_interpreter(self, interpreter_name, properties):
response = requests.get(self._url('interpreter/', 'setting'))
try:
body = response.json()['body']
except json.JSONDecodeError:
hookenv.log('Invalid response from API server: {} {}'.format(response, response.text),
hookenv.ERROR)
raise
for interpreter_data in body:
if interpreter_data['name'] == interpreter_name:
break
else:
raise ValueError('Interpreter not found: {}'.format(interpreter_name))
interpreter_data['properties'].update(properties)
response = requests.put(self._url('interpreter/', 'setting/',
interpreter_data['id']),
data=json.dumps(interpreter_data))
if response.status_code != 200:
raise ValueError('Unable to update interpreter: {}'.format(response.text))
| {
"content_hash": "a101cc148da14681802acb4547c94e5d",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 111,
"avg_line_length": 41.57680250783699,
"alnum_prop": 0.6058961019377215,
"repo_name": "juju-solutions/layer-apache-zeppelin",
"id": "c79e3942fa56c61fec363953a8706bbc1f21ce56",
"size": "13263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/charms/layer/apache_zeppelin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21129"
}
],
"symlink_target": ""
} |
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = '[email protected] (Petar Petrov)'
from google.apputils import basetest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(basetest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatibility
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertTrue(isinstance(self.callback_response,
unittest_pb2.FooResponse))
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
basetest.main()
| {
"content_hash": "a0d7abd23135f54343dfb1e7dabc5357",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 80,
"avg_line_length": 33.74038461538461,
"alnum_prop": 0.6907950983186093,
"repo_name": "meghana0507/grpc-java-poll",
"id": "d066ae707bb7c53410c48aa0b919941461b8a47c",
"size": "5161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/netty/protobuf/python/google/protobuf/internal/service_reflection_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "675278"
},
{
"name": "C++",
"bytes": "13760110"
},
{
"name": "CMake",
"bytes": "18668"
},
{
"name": "CSS",
"bytes": "3570"
},
{
"name": "Emacs Lisp",
"bytes": "15596"
},
{
"name": "HTML",
"bytes": "7473"
},
{
"name": "Java",
"bytes": "15326237"
},
{
"name": "JavaScript",
"bytes": "2933"
},
{
"name": "Makefile",
"bytes": "590545"
},
{
"name": "Protocol Buffer",
"bytes": "1065012"
},
{
"name": "Python",
"bytes": "967273"
},
{
"name": "Ruby",
"bytes": "38278"
},
{
"name": "Shell",
"bytes": "1540526"
},
{
"name": "VimL",
"bytes": "7500"
}
],
"symlink_target": ""
} |
"""This code example updates contact addresses.
To determine which contacts exist, run get_all_contacts.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: ContactService.updateContacts
ContactService.getContactsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the contact to update.
CONTACT_ID = 'INSERT_CONTACT_ID_HERE'
def main(client, contact_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201405')
# Create statement object to select the single contact by ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': contact_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get contacts by statement.
response = contact_service.getContactsByStatement(
statement.ToStatement())
if 'results' in response:
updated_contacts = []
for contact in response['results']:
contact['address'] = '123 New Street, New York, NY, 10011'
updated_contacts.append(contact)
# Update the contact on the server.
contacts = contact_service.updateContacts(updated_contacts)
# Display results.
for contact in contacts:
print (('Contact with ID \'%s\', name \'%s\', and address \'%s\' '
'was updated.')
% (contact['id'], contact['name'], contact['address']))
else:
print 'No contacts found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CONTACT_ID)
| {
"content_hash": "317a5a345ac56065dce19b6e30edf04b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 30.3015873015873,
"alnum_prop": 0.676794133053955,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "1aa2b6a97972ab1c1172f9caa2dabdecf31d4403",
"size": "2527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201405/contact_service/update_contacts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
from wtforms import TextField, SelectMultipleField, SelectField
from wtforms.validators import DataRequired, NumberRange, Email
from ..datacenter import DatacenterCheckForm
class CheckForm(DatacenterCheckForm):
''' Monitor form for cloudflare traffic increase monitoring '''
title = "CloudFlare: Increase in Traffic"
description = """
This monitor utilizes CloudFlare's zone analyitics to detect an increase in HTTP requests. This monitor can be used to detect changes to HTTP traffic and be combined with scaling reactions. The threshold setting allows you to define the percentage of change to trigger on. For example; if more than 50% of the web traffic increases trigger this monitor as True.
"""
placeholders = DatacenterCheckForm.placeholders
return_choices = [
("true", "True"),
("false", "False")
]
start_choices = [
("-30", "1 minute"),
("-360", "15 minutes"),
("-720", "30 minutes"),
("-1440", "1 hour"),
("-10080", "1 day")
]
email = TextField(
"Email",
description=DatacenterCheckForm.descriptions['cloudflare']['email'],
validators=[Email(message='Email address invalid')])
domain = TextField(
"Domain",
description=DatacenterCheckForm.descriptions['domain'],
validators=[DataRequired(message='Domain is a required field')])
apikey = TextField(
"API Key",
description=DatacenterCheckForm.descriptions['apikey'],
validators=[DataRequired(message='API Key is a required field')])
threshold = TextField(
"Threshold",
description="""
Define the percentage of change to trigger this monitor on. For example; if you wish this monitor to be True when traffic increases by 20% set this threshold to 20
""",
validators=[DataRequired(message='Threshold is a required field'), NumberRange(min=1, message="Threshold must be a number between 1 - 100")])
start_time = SelectField(
"Time Span",
description=DatacenterCheckForm.descriptions['cloudflare']['timespan'],
choices=start_choices,
validators=[DataRequired(message="Time Span is a required field")])
return_value = SelectField(
"Return Value",
description=DatacenterCheckForm.descriptions['return_value'],
choices=return_choices,
validators=[DataRequired(message="Return Value is a required field")])
if __name__ == '__main__':
pass
| {
"content_hash": "13fe815ad71fc52d6421812834f4fac1",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 365,
"avg_line_length": 42.32203389830509,
"alnum_prop": 0.672006407689227,
"repo_name": "Runbook/runbook",
"id": "fd773006987149c9b0041db28e701c21bb48ca61",
"size": "2791",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/web/monitorforms/cloudflare-traffic-increase/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17816"
},
{
"name": "HTML",
"bytes": "227999"
},
{
"name": "JavaScript",
"bytes": "4250"
},
{
"name": "Python",
"bytes": "754910"
},
{
"name": "Shell",
"bytes": "5859"
}
],
"symlink_target": ""
} |
"""
This module contains a helper function to fill erfa.astrom struct and a
ScienceState, which allows to speed up coordinate transformations at the
expense of accuracy.
"""
import warnings
import erfa
import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.state import ScienceState
from .builtin_frames.utils import (
get_cip, get_jd12, get_polar_motion, pav2pv, prepare_earth_position_vel)
from .matrix_utilities import rotation_matrix
__all__ = []
class ErfaAstrom:
'''
The default provider for astrometry values.
A utility class to extract the necessary arguments for
erfa functions from frame attributes, call the corresponding
erfa functions and return the astrom object.
'''
@staticmethod
def apco(frame_or_coord):
'''
Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz or CIRS frame is expected.
'''
lon, lat, height = frame_or_coord.location.to_geodetic('WGS84')
obstime = frame_or_coord.obstime
jd1_tt, jd2_tt = get_jd12(obstime, 'tt')
xp, yp = get_polar_motion(obstime)
sp = erfa.sp00(jd1_tt, jd2_tt)
x, y, s = get_cip(jd1_tt, jd2_tt)
era = erfa.era00(*get_jd12(obstime, 'ut1'))
earth_pv, earth_heliocentric = prepare_earth_position_vel(obstime)
# refraction constants
if hasattr(frame_or_coord, 'pressure'):
# this is an AltAz like frame. Calculate refraction
refa, refb = erfa.refco(
frame_or_coord.pressure.to_value(u.hPa),
frame_or_coord.temperature.to_value(u.deg_C),
frame_or_coord.relative_humidity.value,
frame_or_coord.obswl.to_value(u.micron)
)
else:
# This is not an AltAz frame, so don't bother computing refraction
refa, refb = 0.0, 0.0
return erfa.apco(
jd1_tt, jd2_tt, earth_pv, earth_heliocentric, x, y, s, era,
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m),
xp, yp, sp, refa, refb
)
@staticmethod
def apcs(frame_or_coord):
'''
Wrapper for ``erfa.apcs``, used in conversions GCRS <-> ICRS
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, a GCRS frame is expected.
'''
jd1_tt, jd2_tt = get_jd12(frame_or_coord.obstime, 'tt')
obs_pv = pav2pv(
frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value,
frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value
)
earth_pv, earth_heliocentric = prepare_earth_position_vel(frame_or_coord.obstime)
return erfa.apcs(jd1_tt, jd2_tt, obs_pv, earth_pv, earth_heliocentric)
@staticmethod
def apio(frame_or_coord):
'''
Slightly modified equivalent of ``erfa.apio``, used in conversions AltAz <-> CIRS.
Since we use a topocentric CIRS frame, we have dropped the steps needed to calculate
diurnal aberration.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz frame is expected.
'''
# Calculate erfa.apio input parameters.
# TIO locator s'
sp = erfa.sp00(*get_jd12(frame_or_coord.obstime, 'tt'))
# Earth rotation angle.
theta = erfa.era00(*get_jd12(frame_or_coord.obstime, 'ut1'))
# Longitude and latitude in radians.
lon, lat, height = frame_or_coord.location.to_geodetic('WGS84')
elong = lon.to_value(u.radian)
phi = lat.to_value(u.radian)
# Polar motion, rotated onto local meridian
xp, yp = get_polar_motion(frame_or_coord.obstime)
# we need an empty astrom structure before we fill in the required sections
astrom = np.zeros(frame_or_coord.obstime.shape, dtype=erfa.dt_eraASTROM)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (rotation_matrix(elong, 'z', unit=u.radian)
@ rotation_matrix(-yp, 'x', unit=u.radian)
@ rotation_matrix(-xp, 'y', unit=u.radian)
@ rotation_matrix(theta+sp, 'z', unit=u.radian))
# Solve for local Earth rotation angle.
a = r[..., 0, 0]
b = r[..., 0, 1]
eral = np.arctan2(b, a)
astrom['eral'] = eral
# Solve for polar motion [X,Y] with respect to local meridian.
c = r[..., 0, 2]
astrom['xpl'] = np.arctan2(c, np.sqrt(a*a+b*b))
a = r[..., 1, 2]
b = r[..., 2, 2]
astrom['ypl'] = -np.arctan2(a, b)
# Adjusted longitude.
astrom['along'] = erfa.anpm(eral - theta)
# Functions of latitude.
astrom['sphi'] = np.sin(phi)
astrom['cphi'] = np.cos(phi)
# Omit two steps that are zero for a geocentric observer:
# Observer's geocentric position and velocity (m, m/s, CIRS).
# Magnitude of diurnal aberration vector.
# Refraction constants.
astrom['refa'], astrom['refb'] = erfa.refco(
frame_or_coord.pressure.to_value(u.hPa),
frame_or_coord.temperature.to_value(u.deg_C),
frame_or_coord.relative_humidity.value,
frame_or_coord.obswl.to_value(u.micron)
)
return astrom
class ErfaAstromInterpolator(ErfaAstrom):
'''
A provider for astrometry values that does not call erfa
for each individual timestamp but interpolates linearly
between support points.
For the interpolation, float64 MJD values are used, so time precision
for the interpolation will be around a microsecond.
This can dramatically speed up coordinate transformations,
e.g. between CIRS and ICRS,
when obstime is an array of many values (factors of 10 to > 100 depending
on the selected resolution, number of points and the time range of the values).
The precision of the transformation will still be in the order of microseconds
for reasonable values of time_resolution, e.g. ``300 * u.s``.
Users should benchmark performance and accuracy with the default transformation
for their specific use case and then choose a suitable ``time_resolution``
from there.
This class is intended be used together with the ``erfa_astrom`` science state,
e.g. in a context manager like this
Example
-------
>>> from astropy.coordinates import SkyCoord, CIRS
>>> from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator
>>> import astropy.units as u
>>> from astropy.time import Time
>>> import numpy as np
>>> obstime = Time('2010-01-01T20:00:00') + np.linspace(0, 4, 1000) * u.hour
>>> crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s')
>>> with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
... cirs = crab.transform_to(CIRS(obstime=obstime))
'''
@u.quantity_input(time_resolution=u.day)
def __init__(self, time_resolution):
if time_resolution.to_value(u.us) < 10:
warnings.warn(
f'Using {self.__class__.__name__} with `time_resolution`'
' below 10 microseconds might lead to numerical inaccuracies'
' as the MJD-based interpolation is limited by floating point '
' precision to about a microsecond of precision',
AstropyWarning
)
self.mjd_resolution = time_resolution.to_value(u.day)
def _get_support_points(self, obstime):
'''
Calculate support points for the interpolation.
We divide the MJD by the time resolution (as single float64 values),
and calculate ceil and floor.
Then we take the unique and sorted values and scale back to MJD.
This will create a sparse support for non-regular input obstimes.
'''
mjd_scaled = np.ravel(obstime.mjd / self.mjd_resolution)
# unique already does sorting
mjd_u = np.unique(np.concatenate([
np.floor(mjd_scaled),
np.ceil(mjd_scaled),
]))
return Time(
mjd_u * self.mjd_resolution,
format='mjd',
scale=obstime.scale,
)
@staticmethod
def _prepare_earth_position_vel(support, obstime):
"""
Calculate Earth's position and velocity.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
pv_support, heliocentric_support = prepare_earth_position_vel(support)
# do interpolation
earth_pv = np.empty(obstime.shape, dtype=erfa.dt_pv)
earth_heliocentric = np.empty(obstime.shape + (3,))
for dim in range(3):
for key in 'pv':
earth_pv[key][..., dim] = np.interp(
obstime.mjd,
support.mjd,
pv_support[key][..., dim]
)
earth_heliocentric[..., dim] = np.interp(
obstime.mjd, support.mjd, heliocentric_support[..., dim]
)
return earth_pv, earth_heliocentric
@staticmethod
def _get_c2i(support, obstime):
"""
Calculate the Celestial-to-Intermediate rotation matrix.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
jd1_tt_support, jd2_tt_support = get_jd12(support, 'tt')
c2i_support = erfa.c2i06a(jd1_tt_support, jd2_tt_support)
c2i = np.empty(obstime.shape + (3, 3))
for dim1 in range(3):
for dim2 in range(3):
c2i[..., dim1, dim2] = np.interp(obstime.mjd, support.mjd, c2i_support[..., dim1, dim2])
return c2i
@staticmethod
def _get_cip(support, obstime):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
jd1_tt_support, jd2_tt_support = get_jd12(support, 'tt')
cip_support = get_cip(jd1_tt_support, jd2_tt_support)
return tuple(
np.interp(obstime.mjd, support.mjd, cip_component)
for cip_component in cip_support
)
@staticmethod
def _get_polar_motion(support, obstime):
"""
Find the two polar motion components in radians
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
polar_motion_support = get_polar_motion(support)
return tuple(
np.interp(obstime.mjd, support.mjd, polar_motion_component)
for polar_motion_component in polar_motion_support
)
def apco(self, frame_or_coord):
'''
Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz or CIRS frame is expected.
'''
lon, lat, height = frame_or_coord.location.to_geodetic('WGS84')
obstime = frame_or_coord.obstime
support = self._get_support_points(obstime)
jd1_tt, jd2_tt = get_jd12(obstime, 'tt')
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
earth_pv, earth_heliocentric = self._prepare_earth_position_vel(support, obstime)
xp, yp = self._get_polar_motion(support, obstime)
sp = erfa.sp00(jd1_tt, jd2_tt)
x, y, s = self._get_cip(support, obstime)
era = erfa.era00(*get_jd12(obstime, 'ut1'))
# refraction constants
if hasattr(frame_or_coord, 'pressure'):
# an AltAz like frame. Include refraction
refa, refb = erfa.refco(
frame_or_coord.pressure.to_value(u.hPa),
frame_or_coord.temperature.to_value(u.deg_C),
frame_or_coord.relative_humidity.value,
frame_or_coord.obswl.to_value(u.micron)
)
else:
# a CIRS like frame - no refraction
refa, refb = 0.0, 0.0
return erfa.apco(
jd1_tt, jd2_tt, earth_pv, earth_heliocentric, x, y, s, era,
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m),
xp, yp, sp, refa, refb
)
def apcs(self, frame_or_coord):
'''
Wrapper for ``erfa.apci``, used in conversions GCRS <-> ICRS
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, a GCRS frame is expected.
'''
obstime = frame_or_coord.obstime
support = self._get_support_points(obstime)
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
earth_pv, earth_heliocentric = self._prepare_earth_position_vel(support, obstime)
pv = pav2pv(
frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value,
frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value
)
jd1_tt, jd2_tt = get_jd12(obstime, 'tt')
return erfa.apcs(jd1_tt, jd2_tt, pv, earth_pv, earth_heliocentric)
class erfa_astrom(ScienceState):
"""
ScienceState to select with astrom provider is used in
coordinate transformations.
"""
_value = ErfaAstrom()
@classmethod
def validate(cls, value):
if not isinstance(value, ErfaAstrom):
raise TypeError(f'Must be an instance of {ErfaAstrom!r}')
return value
| {
"content_hash": "b0d4c78c714659388172b32cc667ae66",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 104,
"avg_line_length": 38.03045685279188,
"alnum_prop": 0.6074479444741057,
"repo_name": "lpsinger/astropy",
"id": "df217a8f14492a78af3598eca974e13da31ca49c",
"size": "15048",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/coordinates/erfa_astrom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import unittest
from cassandra import OperationTimedOut, WriteTimeout
from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture, EXEC_PROFILE_DEFAULT, NoHostAvailable
from cassandra.query import SimpleStatement
from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType
from cassandra.protocol import OverloadedErrorMessage, IsBootstrappingErrorMessage, TruncateError, ServerError
from tests.integration import greaterthancass21, requiressimulacron, SIMULACRON_JAR, \
CASSANDRA_VERSION
from tests.integration.simulacron import PROTOCOL_VERSION
from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \
stop_simulacron, NO_THEN, clear_queries
from itertools import count
from packaging.version import Version
class BadRoundRobinPolicy(RoundRobinPolicy):
def make_query_plan(self, working_keyspace=None, query=None):
pos = self._position
self._position += 1
hosts = []
for _ in range(10):
hosts.extend(self._live_hosts)
return hosts
# This doesn't work well with Windows clock granularity
@requiressimulacron
class SpecExecTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
return
start_and_prime_singledc()
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
cls.session = cls.cluster.connect(wait_for_all_pools=True)
spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(1, 6),
request_timeout=12)
spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10),
request_timeout=12)
spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.5, 1),
request_timeout=12)
spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(4, 10))
cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim)
@classmethod
def tearDownClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
return
cls.cluster.shutdown()
stop_simulacron()
def tearDown(self):
clear_queries()
@greaterthancass21
def test_speculative_execution(self):
"""
Test to ensure that speculative execution honors LBP, and that they retry appropriately.
This test will use various LBP, and ConstantSpeculativeExecutionPolicy settings and ensure the proper number of hosts are queried
@since 3.7.0
@jira_ticket PYTHON-218
@expected_result speculative retries should honor max retries, idempotent state of queries, and underlying lbp.
@test_category metadata
"""
query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);"
prime_query(query_to_prime, then={"delay_in_ms": 10000})
statement = SimpleStatement(query_to_prime, is_idempotent=True)
statement_non_idem = SimpleStatement(query_to_prime, is_idempotent=False)
# This LBP should repeat hosts up to around 30
result = self.session.execute(statement, execution_profile='spec_ep_brr')
self.assertEqual(7, len(result.response_future.attempted_hosts))
# This LBP should keep host list to 3
result = self.session.execute(statement, execution_profile='spec_ep_rr')
self.assertEqual(3, len(result.response_future.attempted_hosts))
# Spec_execution policy should limit retries to 1
result = self.session.execute(statement, execution_profile='spec_ep_rr_lim')
self.assertEqual(2, len(result.response_future.attempted_hosts))
# Spec_execution policy should not be used if the query is not idempotent
result = self.session.execute(statement_non_idem, execution_profile='spec_ep_brr')
self.assertEqual(1, len(result.response_future.attempted_hosts))
# Default policy with non_idem query
result = self.session.execute(statement_non_idem, timeout=12)
self.assertEqual(1, len(result.response_future.attempted_hosts))
# Should be able to run an idempotent query against default execution policy with no speculative_execution_policy
result = self.session.execute(statement, timeout=12)
self.assertEqual(1, len(result.response_future.attempted_hosts))
# Test timeout with spec_ex
with self.assertRaises(OperationTimedOut):
self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5)
prepared_query_to_prime = "SELECT * FROM test3rf.test where k = ?"
when = {"params": {"k": "0"}, "param_types": {"k": "ascii"}}
prime_query(prepared_query_to_prime, when=when, then={"delay_in_ms": 4000})
# PYTHON-736 Test speculation policy works with a prepared statement
prepared_statement = self.session.prepare(prepared_query_to_prime)
# non-idempotent
result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr')
self.assertEqual(1, len(result.response_future.attempted_hosts))
# idempotent
prepared_statement.is_idempotent = True
result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr')
self.assertLess(1, len(result.response_future.attempted_hosts))
def test_speculative_and_timeout(self):
"""
Test to ensure the timeout is honored when using speculative execution
@since 3.10
@jira_ticket PYTHON-750
@expected_result speculative retries be schedule every fixed period, during the maximum
period of the timeout.
@test_category metadata
"""
query_to_prime = "INSERT INTO testkeyspace.testtable (k, v) VALUES (0, 1);"
prime_query(query_to_prime, then=NO_THEN)
statement = SimpleStatement(query_to_prime, is_idempotent=True)
# An OperationTimedOut is placed here in response_future,
# that's why we can't call session.execute,which would raise it, but
# we have to directly wait for the event
response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim',
timeout=14)
response_future._event.wait(16)
self.assertIsInstance(response_future._final_exception, OperationTimedOut)
# This is because 14 / 4 + 1 = 4
self.assertEqual(len(response_future.attempted_hosts), 4)
def test_delay_can_be_0(self):
"""
Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy
@since 3.13
@jira_ticket PYTHON-836
@expected_result all the queries are executed immediately
@test_category policy
"""
query_to_prime = "INSERT INTO madeup_keyspace.madeup_table(k, v) VALUES (1, 2)"
prime_query(query_to_prime, then={"delay_in_ms": 5000})
number_of_requests = 4
spec = ExecutionProfile(load_balancing_policy=RoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests))
cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
cluster.add_execution_profile("spec", spec)
session = cluster.connect(wait_for_all_pools=True)
self.addCleanup(cluster.shutdown)
counter = count()
def patch_and_count(f):
def patched(*args, **kwargs):
next(counter)
f(*args, **kwargs)
return patched
self.addCleanup(setattr, ResponseFuture, "send_request", ResponseFuture.send_request)
ResponseFuture.send_request = patch_and_count(ResponseFuture.send_request)
stmt = SimpleStatement(query_to_prime)
stmt.is_idempotent = True
results = session.execute(stmt, execution_profile="spec")
self.assertEqual(len(results.response_future.attempted_hosts), 3)
# send_request is called number_of_requests times for the speculative request
# plus one for the call from the main thread.
self.assertEqual(next(counter), number_of_requests + 1)
class CustomRetryPolicy(RetryPolicy):
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
if retry_num != 0:
return self.RETHROW, None
elif write_type == WriteType.SIMPLE:
return self.RETHROW, None
elif write_type == WriteType.CDC:
return self.IGNORE, None
class CounterRetryPolicy(RetryPolicy):
def __init__(self):
self.write_timeout = count()
self.read_timeout = count()
self.unavailable = count()
self.request_error = count()
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
next(self.read_timeout)
return self.IGNORE, None
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
next(self.write_timeout)
return self.IGNORE, None
def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num):
next(self.unavailable)
return self.IGNORE, None
def on_request_error(self, query, consistency, error, retry_num):
next(self.request_error)
return self.RETHROW, None
def reset_counters(self):
self.write_timeout = count()
self.read_timeout = count()
self.unavailable = count()
self.request_error = count()
@requiressimulacron
class RetryPolicyTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
return
start_and_prime_singledc()
@classmethod
def tearDownClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
return
stop_simulacron()
def tearDown(self):
clear_queries()
def set_cluster(self, retry_policy):
self.cluster = Cluster(
protocol_version=PROTOCOL_VERSION,
compression=False,
execution_profiles={
EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=retry_policy)
},
)
self.session = self.cluster.connect(wait_for_all_pools=True)
self.addCleanup(self.cluster.shutdown)
def test_retry_policy_ignores_and_rethrows(self):
"""
Test to verify :class:`~cassandra.protocol.WriteTimeoutErrorMessage` is decoded correctly and that
:attr:`.~cassandra.policies.RetryPolicy.RETHROW` and
:attr:`.~cassandra.policies.RetryPolicy.IGNORE` are respected
to localhost
@since 3.12
@jira_ticket PYTHON-812
@expected_result the retry policy functions as expected
@test_category connection
"""
self.set_cluster(CustomRetryPolicy())
query_to_prime_simple = "SELECT * from simulacron_keyspace.simple"
query_to_prime_cdc = "SELECT * from simulacron_keyspace.cdc"
then = {
"result": "write_timeout",
"delay_in_ms": 0,
"consistency_level": "LOCAL_QUORUM",
"received": 1,
"block_for": 2,
"write_type": "SIMPLE",
"ignore_on_prepare": True
}
prime_query(query_to_prime_simple, rows=None, column_types=None, then=then)
then["write_type"] = "CDC"
prime_query(query_to_prime_cdc, rows=None, column_types=None, then=then)
with self.assertRaises(WriteTimeout):
self.session.execute(query_to_prime_simple)
#CDC should be ignored
self.session.execute(query_to_prime_cdc)
def test_retry_policy_with_prepared(self):
"""
Test to verify that the retry policy is called as expected
for bound and prepared statements when set at the cluster level
@since 3.13
@jira_ticket PYTHON-861
@expected_result the appropriate retry policy is called
@test_category connection
"""
counter_policy = CounterRetryPolicy()
self.set_cluster(counter_policy)
query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table"
then = {
"result": "write_timeout",
"delay_in_ms": 0,
"consistency_level": "LOCAL_QUORUM",
"received": 1,
"block_for": 2,
"write_type": "SIMPLE",
"ignore_on_prepare": True
}
prime_query(query_to_prime, then=then, rows=None, column_types=None)
self.session.execute(query_to_prime)
self.assertEqual(next(counter_policy.write_timeout), 1)
counter_policy.reset_counters()
query_to_prime_prepared = "SELECT * from simulacron_keyspace.simulacron_table WHERE key = :key"
when = {"params": {"key": "0"}, "param_types": {"key": "ascii"}}
prime_query(query_to_prime_prepared, when=when, then=then, rows=None, column_types=None)
prepared_stmt = self.session.prepare(query_to_prime_prepared)
bound_stm = prepared_stmt.bind({"key": "0"})
self.session.execute(bound_stm)
self.assertEqual(next(counter_policy.write_timeout), 1)
counter_policy.reset_counters()
self.session.execute(prepared_stmt, ("0",))
self.assertEqual(next(counter_policy.write_timeout), 1)
def test_setting_retry_policy_to_statement(self):
"""
Test to verify that the retry policy is called as expected
for bound and prepared statements when set to the prepared statement
@since 3.13
@jira_ticket PYTHON-861
@expected_result the appropriate retry policy is called
@test_category connection
"""
retry_policy = RetryPolicy()
self.set_cluster(retry_policy)
then = {
"result": "write_timeout",
"delay_in_ms": 0,
"consistency_level": "LOCAL_QUORUM",
"received": 1,
"block_for": 2,
"write_type": "SIMPLE",
"ignore_on_prepare": True
}
query_to_prime_prepared = "SELECT * from simulacron_keyspace.simulacron_table WHERE key = :key"
when = {"params": {"key": "0"}, "param_types": {"key": "ascii"}}
prime_query(query_to_prime_prepared, when=when, then=then, rows=None, column_types=None)
counter_policy = CounterRetryPolicy()
prepared_stmt = self.session.prepare(query_to_prime_prepared)
prepared_stmt.retry_policy = counter_policy
self.session.execute(prepared_stmt, ("0",))
self.assertEqual(next(counter_policy.write_timeout), 1)
counter_policy.reset_counters()
bound_stmt = prepared_stmt.bind({"key": "0"})
bound_stmt.retry_policy = counter_policy
self.session.execute(bound_stmt)
self.assertEqual(next(counter_policy.write_timeout), 1)
def test_retry_policy_on_request_error(self):
"""
Test to verify that on_request_error is called properly.
@since 3.18
@jira_ticket PYTHON-1064
@expected_result the appropriate retry policy is called
@test_category connection
"""
overloaded_error = {
"result": "overloaded",
"message": "overloaded"
}
bootstrapping_error = {
"result": "is_bootstrapping",
"message": "isbootstrapping"
}
truncate_error = {
"result": "truncate_error",
"message": "truncate_error"
}
server_error = {
"result": "server_error",
"message": "server_error"
}
# Test the on_request_error call
retry_policy = CounterRetryPolicy()
self.set_cluster(retry_policy)
for prime_error, exc in [
(overloaded_error, OverloadedErrorMessage),
(bootstrapping_error, IsBootstrappingErrorMessage),
(truncate_error, TruncateError),
(server_error, ServerError)]:
clear_queries()
query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table;"
prime_query(query_to_prime, then=prime_error, rows=None, column_types=None)
rf = self.session.execute_async(query_to_prime)
with self.assertRaises(exc):
rf.result()
self.assertEqual(len(rf.attempted_hosts), 1) # no retry
self.assertEqual(next(retry_policy.request_error), 4)
# Test that by default, retry on next host
retry_policy = RetryPolicy()
self.set_cluster(retry_policy)
for e in [overloaded_error, bootstrapping_error, truncate_error, server_error]:
clear_queries()
query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table;"
prime_query(query_to_prime, then=e, rows=None, column_types=None)
rf = self.session.execute_async(query_to_prime)
with self.assertRaises(NoHostAvailable):
rf.result()
self.assertEqual(len(rf.attempted_hosts), 3) # all 3 nodes failed
| {
"content_hash": "7dd927aaf64f2bcae1dffbfe3c0fd08c",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 137,
"avg_line_length": 40.44642857142857,
"alnum_prop": 0.6374172185430463,
"repo_name": "datastax/python-driver",
"id": "6d0d081889ad50f85865a8a6d1e72b7c283b1b72",
"size": "18689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/simulacron/test_policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28914"
},
{
"name": "Cython",
"bytes": "51225"
},
{
"name": "Groovy",
"bytes": "41012"
},
{
"name": "PowerShell",
"bytes": "5631"
},
{
"name": "Python",
"bytes": "3219458"
}
],
"symlink_target": ""
} |
__all__ = ["ProcessResponse"]
| {
"content_hash": "b61da6d6688f85614a20627de05afb39",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.6,
"repo_name": "MaxMorgenstern/EmeraldAI",
"id": "d4523331b3b70ba369e09661a77c2fa548a0a29e",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EmeraldAI/Pipelines/ResponseProcessing/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "133"
},
{
"name": "C",
"bytes": "34040"
},
{
"name": "C++",
"bytes": "238832"
},
{
"name": "Java",
"bytes": "69620"
},
{
"name": "Python",
"bytes": "287448"
},
{
"name": "Shell",
"bytes": "11298"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from ast import parse
import os
from setuptools import setup
from sys import version_info
def version():
"""Return version string from pattern/__init__.py."""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'pattern',
'__init__.py')) as input_file:
for line in input_file:
if line.startswith('__version__'):
return parse(line).body[0].value.s
install_requires = ["beautifulsoup4",
"cherrypy",
"docx",
"feedparser",
"pdfminer" if version_info[0] == 2 else "pdfminer3k",
"simplejson"]
setup(
name="pattern",
version=version(),
description="Web mining module for Python.",
license="BSD",
author="Tom De Smedt",
author_email="[email protected]",
url="http://www.clips.ua.ac.be/pages/pattern",
packages=["pattern",
"pattern.web",
"pattern.web.cache",
"pattern.web.imap",
"pattern.web.locale",
"pattern.web.oauth",
"pattern.db",
"pattern.text",
"pattern.text.de",
"pattern.text.en",
"pattern.text.en.wordlist",
"pattern.text.en.wordnet",
"pattern.text.en.wordnet.pywordnet",
"pattern.text.es",
"pattern.text.fr",
"pattern.text.it",
"pattern.text.nl",
"pattern.vector",
"pattern.vector.svm",
"pattern.graph",
"pattern.server"
],
package_data={"pattern": ["*.js"],
"pattern.web.cache": ["tmp/*"],
"pattern.web.locale": ["__init__.py"],
"pattern.text.de": ["*.txt", "*.xml"],
"pattern.text.en": ["*.txt", "*.xml", "*.slp"],
"pattern.text.en.wordlist": ["*.txt"],
"pattern.text.en.wordnet": ["*.txt", "dict/*"],
"pattern.text.en.wordnet.pywordnet": ["*.py"],
"pattern.text.es": ["*.txt", "*.xml"],
"pattern.text.fr": ["*.txt", "*.xml"],
"pattern.text.it": ["*.txt", "*.xml"],
"pattern.text.nl": ["*.txt", "*.xml"],
"pattern.vector": ["*.txt"],
"pattern.vector.svm": ["*.txt", "libsvm-3.11/*", "libsvm-3.17/*", "liblinear-1.93/*"],
"pattern.graph": ["*.js", "*.csv"],
"pattern.server": ["static/*"],
},
py_modules=["pattern.metrics",
"pattern.text.search",
"pattern.text.tree"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: Dutch",
"Natural Language :: English",
"Natural Language :: French",
"Natural Language :: German",
"Natural Language :: Italian",
"Natural Language :: Spanish",
"Operating System :: OS Independent",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
"Programming Language :: JavaScript",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: Markup :: HTML"
],
zip_safe=True,
install_requires=install_requires
)
| {
"content_hash": "ab8d8fa6231027ba6acdba35c6baa7ab",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 104,
"avg_line_length": 38.90909090909091,
"alnum_prop": 0.5021028037383177,
"repo_name": "shubhangiKishore/pattern",
"id": "5199dd2ce28c190711fb5bc16b35c9cd138ec096",
"size": "4280",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "606"
},
{
"name": "JavaScript",
"bytes": "228129"
},
{
"name": "Python",
"bytes": "1553353"
}
],
"symlink_target": ""
} |
""" erppeek.py -- Odoo / OpenERP client library and command line tool
Author: Florent Xicluna
(derived from a script by Alan Bell)
"""
import _ast
import atexit
import csv
import functools
import optparse
import os
import re
import shlex
import sys
import time
import traceback
try: # Python 3
import configparser
from threading import current_thread
from xmlrpc.client import Fault, ServerProxy, MININT, MAXINT
PY2 = False
except ImportError: # Python 2
import ConfigParser as configparser
from threading import currentThread as current_thread
from xmlrpclib import Fault, ServerProxy, MININT, MAXINT
PY2 = True
__version__ = '1.6.3'
__all__ = ['Client', 'Model', 'Record', 'RecordList', 'Service',
'format_exception', 'read_config', 'start_odoo_services']
CONF_FILE = 'erppeek.ini'
HIST_FILE = os.path.expanduser('~/.erppeek_history')
DEFAULT_URL = 'http://localhost:8069'
DEFAULT_DB = 'openerp'
DEFAULT_USER = 'admin'
MAXCOL = [79, 179, 9999] # Line length in verbose mode
USAGE = """\
Usage (some commands):
models(name) # List models matching pattern
model(name) # Return a Model instance
model(name).keys() # List field names of the model
model(name).fields(names=None) # Return details for the fields
model(name).field(name) # Return details for the field
model(name).browse(domain)
model(name).browse(domain, offset=0, limit=None, order=None)
# Return a RecordList
rec = model(name).get(domain) # Get the Record matching domain
rec.some_field # Return the value of this field
rec.read(fields=None) # Return values for the fields
client.login(user) # Login with another user
client.connect(env) # Connect to another env.
client.modules(name) # List modules matching pattern
client.upgrade(module1, module2, ...)
# Upgrade the modules
"""
DOMAIN_OPERATORS = frozenset('!|&')
# Supported operators are:
# =, !=, >, >=, <, <=, like, ilike, in, not like, not ilike, not in,
# child_of, =like, =ilike, =?
_term_re = re.compile(
'([\w._]+)\s*' '(=(?:like|ilike|\?)|[<>]=?|!?=(?!=)'
'|(?<= )(?:like|ilike|in|not like|not ilike|not in|child_of))' '\s*(.*)')
_fields_re = re.compile(r'(?:[^%]|^)%\(([^)]+)\)')
# Published object methods
_methods = {
'db': ['create_database', 'duplicate_database', 'db_exist',
'drop', 'dump', 'restore', 'rename', 'list', 'list_lang',
'change_admin_password', 'server_version', 'migrate_databases'],
'common': ['about', 'login', 'timezone_get',
'authenticate', 'version', 'set_loglevel'],
'object': ['execute', 'execute_kw', 'exec_workflow'],
'report': ['render_report', 'report', 'report_get'],
}
# New 6.1: (db) create_database db_exist,
# (common) authenticate version set_loglevel
# (object) execute_kw, (report) render_report
# New 7.0: (db) duplicate_database
_obsolete_methods = {
'db': ['create', 'get_progress'], # < 8.0
'common': ['check_connectivity', 'get_available_updates', 'get_os_time',
'get_migration_scripts', 'get_server_environment',
'get_sqlcount', 'get_stats',
'list_http_services', 'login_message'], # < 8.0
'wizard': ['execute', 'create'], # < 7.0
}
_cause_message = ("\nThe above exception was the direct cause "
"of the following exception:\n\n")
_pending_state = ('state', 'not in',
['uninstallable', 'uninstalled', 'installed'])
if PY2:
int_types = int, long
class _DictWriter(csv.DictWriter):
"""Unicode CSV Writer, which encodes output to UTF-8."""
def writeheader(self):
# Method 'writeheader' does not exist in Python 2.6
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
rowlst = csv.DictWriter._dict_to_list(self, rowdict)
return [cell.encode('utf-8') if hasattr(cell, 'encode') else cell
for cell in rowlst]
else: # Python 3
basestring = str
int_types = int
_DictWriter = csv.DictWriter
def _memoize(inst, attr, value, doc_values=None):
if hasattr(value, '__get__') and not hasattr(value, '__self__'):
value.__name__ = attr
if doc_values is not None:
value.__doc__ %= doc_values
value = value.__get__(inst, type(inst))
inst.__dict__[attr] = value
return value
# Simplified ast.literal_eval which does not parse operators
def _convert(node, _consts={'None': None, 'True': True, 'False': False}):
if isinstance(node, _ast.Str):
return node.s
if isinstance(node, _ast.Num):
return node.n
if isinstance(node, _ast.Tuple):
return tuple(map(_convert, node.elts))
if isinstance(node, _ast.List):
return list(map(_convert, node.elts))
if isinstance(node, _ast.Dict):
return dict([(_convert(k), _convert(v))
for (k, v) in zip(node.keys, node.values)])
if hasattr(node, 'value') and str(node.value) in _consts:
return node.value # Python 3.4+
if isinstance(node, _ast.Name) and node.id in _consts:
return _consts[node.id] # Python <= 3.3
raise ValueError('malformed or disallowed expression')
def literal_eval(expression, _octal_digits=frozenset('01234567')):
node = compile(expression, '<unknown>', 'eval', _ast.PyCF_ONLY_AST)
if expression[:1] == '0' and expression[1:2] in _octal_digits:
raise SyntaxError('unsupported octal notation')
value = _convert(node.body)
if isinstance(value, int_types) and not MININT <= value <= MAXINT:
raise ValueError('overflow, int exceeds XML-RPC limits')
return value
def is_list_of_dict(iterator):
"""Return True if the first non-false item is a dict."""
for item in iterator:
if item:
return isinstance(item, dict)
return False
def mixedcase(s, _cache={}):
"""Convert to MixedCase.
>>> mixedcase('res.company')
'ResCompany'
"""
try:
return _cache[s]
except KeyError:
_cache[s] = s = ''.join([w.capitalize() for w in s.split('.')])
return s
def lowercase(s, _sub=re.compile('[A-Z]').sub,
_repl=(lambda m: '.' + m.group(0).lower()), _cache={}):
"""Convert to lowercase with dots.
>>> lowercase('ResCompany')
'res.company'
"""
try:
return _cache[s]
except KeyError:
_cache[s] = s = _sub(_repl, s).lstrip('.')
return s
def format_exception(exc_type, exc, tb, limit=None, chain=True,
_format_exception=traceback.format_exception):
"""Format a stack trace and the exception information.
This wrapper is a replacement of ``traceback.format_exception``
which formats the error and traceback received by XML-RPC.
If `chain` is True, then the original exception is printed too.
"""
values = _format_exception(exc_type, exc, tb, limit=limit)
if issubclass(exc_type, Error):
values = [str(exc) + '\n']
elif ((issubclass(exc_type, Fault) and
isinstance(exc.faultCode, basestring))):
# Format readable 'Fault' errors
(etype, __, msg) = exc.faultCode.partition('--')
server_tb = None
if etype.strip() != 'warning':
msg = exc.faultCode
if not msg.startswith('FATAL:'):
server_tb = exc.faultString
fault = '%s: %s\n' % (exc_type.__name__, msg.strip())
if chain:
values = [server_tb or fault, _cause_message] + values
values[-1] = fault
else:
values = [server_tb or fault]
return values
def read_config(section=None):
"""Read the environment settings from the configuration file.
The config file ``erppeek.ini`` contains a `section` for each environment.
Each section provides parameters for the connection: ``host``, ``port``,
``database``, ``user`` and (optional) ``password``. Default values are
read from the ``[DEFAULT]`` section. If the ``password`` is not in the
configuration file, it is requested on login.
Return a tuple ``(server, db, user, password or None)``.
Without argument, it returns the list of configured environments.
"""
p = configparser.SafeConfigParser()
with open(Client._config_file) as f:
p.readfp(f)
if section is None:
return p.sections()
env = dict(p.items(section))
scheme = env.get('scheme', 'http')
if scheme == 'local':
server = shlex.split(env.get('options', ''))
else:
server = '%s://%s:%s' % (scheme, env['host'], env['port'])
return (server, env['database'], env['username'], env.get('password'))
def start_odoo_services(options=None, appname=None):
"""Initialize the Odoo services.
Import the ``openerp`` package and load the Odoo services.
The argument `options` receives the command line arguments
for ``openerp``. Example:
``['-c', '/path/to/openerp-server.conf', '--without-demo', 'all']``.
Return the ``openerp`` package.
"""
import openerp as odoo
odoo._api_v7 = odoo.release.version_info < (8,)
if not (odoo._api_v7 and odoo.osv.osv.service):
os.putenv('TZ', 'UTC')
if appname is not None:
os.putenv('PGAPPNAME', appname)
odoo.tools.config.parse_config(options or [])
if odoo.release.version_info < (7,):
odoo.netsvc.init_logger()
odoo.osv.osv.start_object_proxy()
odoo.service.web_services.start_web_services()
elif odoo._api_v7:
odoo.service.start_internal()
else: # Odoo v8
try:
odoo.api.Environment._local.environments = \
odoo.api.Environments()
except AttributeError:
pass
def close_all():
for db in odoo.modules.registry.RegistryManager.registries.keys():
odoo.sql_db.close_db(db)
atexit.register(close_all)
return odoo
def issearchdomain(arg):
"""Check if the argument is a search domain.
Examples:
- ``[('name', '=', 'mushroom'), ('state', '!=', 'draft')]``
- ``['name = mushroom', 'state != draft']``
- ``[]``
"""
return isinstance(arg, list) and not (arg and (
# Not a list of ids: [1, 2, 3]
isinstance(arg[0], int_types) or
# Not a list of ids as str: ['1', '2', '3']
(isinstance(arg[0], basestring) and arg[0].isdigit())))
def searchargs(params, kwargs=None, context=None):
"""Compute the 'search' parameters."""
if not params:
return ([],)
domain = params[0]
if not isinstance(domain, list):
return params
for (idx, term) in enumerate(domain):
if isinstance(term, basestring) and term not in DOMAIN_OPERATORS:
m = _term_re.match(term.strip())
if not m:
raise ValueError('Cannot parse term %r' % term)
(field, operator, value) = m.groups()
try:
value = literal_eval(value)
except Exception:
# Interpret the value as a string
pass
domain[idx] = (field, operator, value)
if (kwargs or context) and len(params) == 1:
params = (domain,
kwargs.pop('offset', 0),
kwargs.pop('limit', None),
kwargs.pop('order', None),
context)
else:
params = (domain,) + params[1:]
return params
class Error(Exception):
"""An ERPpeek error."""
class Service(object):
"""A wrapper around XML-RPC endpoints.
The connected endpoints are exposed on the Client instance.
The `server` argument is the URL of the server (scheme+host+port).
If `server` is an ``openerp`` module, it is used to connect to the
local server. The `endpoint` argument is the name of the service
(examples: ``"object"``, ``"db"``). The `methods` is the list of methods
which should be exposed on this endpoint. Use ``dir(...)`` on the
instance to list them.
"""
_rpcpath = ''
_methods = ()
def __init__(self, server, endpoint, methods,
transport=None, verbose=False):
if isinstance(server, basestring):
self._rpcpath = rpcpath = server + '/xmlrpc/'
proxy = ServerProxy(rpcpath + endpoint,
transport=transport, allow_none=True)
if hasattr(proxy._ServerProxy__transport, 'close'): # >= 2.7
self.close = proxy._ServerProxy__transport.close
rpc = proxy._ServerProxy__request
elif server._api_v7:
rpc = server.netsvc.ExportService.getService(endpoint).dispatch
else: # Odoo v8
rpc = functools.partial(server.http.dispatch_rpc, endpoint)
self._dispatch = rpc
self._endpoint = endpoint
self._methods = methods
self._verbose = verbose
def __repr__(self):
return "<Service '%s%s'>" % (self._rpcpath, self._endpoint)
__str__ = __repr__
def __dir__(self):
return sorted(self._methods)
def __getattr__(self, name):
if name not in self._methods:
raise AttributeError("'Service' object has no attribute %r" % name)
if self._verbose:
def sanitize(args):
if self._endpoint != 'db' and len(args) > 2:
args = list(args)
args[2] = '*'
return args
maxcol = MAXCOL[min(len(MAXCOL), self._verbose) - 1]
def wrapper(self, *args):
snt = ', '.join([repr(arg) for arg in sanitize(args)])
snt = '%s.%s(%s)' % (self._endpoint, name, snt)
if len(snt) > maxcol:
suffix = '... L=%s' % len(snt)
snt = snt[:maxcol - len(suffix)] + suffix
print('--> ' + snt)
res = self._dispatch(name, args)
rcv = str(res)
if len(rcv) > maxcol:
suffix = '... L=%s' % len(rcv)
rcv = rcv[:maxcol - len(suffix)] + suffix
print('<-- ' + rcv)
return res
else:
wrapper = lambda s, *args: s._dispatch(name, args)
return _memoize(self, name, wrapper)
def __del__(self):
if hasattr(self, 'close'):
self.close()
class Client(object):
"""Connection to an Odoo instance.
This is the top level object.
The `server` is the URL of the instance, like ``http://localhost:8069``.
If `server` is an ``openerp`` module, it is used to connect to the local
server (>= 6.1).
The `db` is the name of the database and the `user` should exist in the
table ``res.users``. If the `password` is not provided, it will be
asked on login.
"""
_config_file = os.path.join(os.curdir, CONF_FILE)
def __init__(self, server, db=None, user=None, password=None,
transport=None, verbose=False):
if isinstance(server, list):
appname = os.path.basename(__file__).rstrip('co')
server = start_odoo_services(server, appname=appname)
elif isinstance(server, basestring) and server[-1:] == '/':
server = server.rstrip('/')
self._server = server
float_version = 99.0
def get_proxy(name):
methods = list(_methods[name]) if (name in _methods) else []
if float_version < 8.0:
methods += _obsolete_methods.get(name) or ()
return Service(server, name, methods, transport, verbose=verbose)
self.server_version = ver = get_proxy('db').server_version()
self.major_version = re.match('\d+\.?\d*', ver).group()
float_version = float(self.major_version)
# Create the XML-RPC proxies
self.db = get_proxy('db')
self.common = get_proxy('common')
self._object = get_proxy('object')
self._report = get_proxy('report')
self._wizard = get_proxy('wizard') if float_version < 7.0 else None
self.reset()
self.context = None
if db:
# Try to login
self.login(user, password=password, database=db)
@classmethod
def from_config(cls, environment, user=None, verbose=False):
"""Create a connection to a defined environment.
Read the settings from the section ``[environment]`` in the
``erppeek.ini`` file and return a connected :class:`Client`.
See :func:`read_config` for details of the configuration file format.
"""
(server, db, conf_user, password) = read_config(environment)
if user and user != conf_user:
password = None
client = cls(server, verbose=verbose)
client._environment = environment
client.login(user or conf_user, password=password, database=db)
return client
def reset(self):
self.user = self._environment = None
self._db, self._models = (), {}
self._execute = self._exec_workflow = None
def __repr__(self):
return "<Client '%s#%s'>" % (self._server or '', self._db)
def login(self, user, password=None, database=None):
"""Switch `user` and (optionally) `database`.
If the `password` is not available, it will be asked.
"""
if database:
try:
dbs = self.db.list()
except Fault:
pass # AccessDenied: simply ignore this check
else:
if database not in dbs:
raise Error("Database '%s' does not exist: %s" %
(database, dbs))
if not self._db:
self._db = database
# Used for logging, copied from openerp.sql_db.db_connect
current_thread().dbname = database
elif self._db:
database = self._db
else:
raise Error('Not connected')
(uid, password) = self._auth(database, user, password)
if not uid:
current_thread().dbname = self._db
raise Error('Invalid username or password')
if self._db != database:
self.reset()
self._db = database
self.user = user
# Authenticated endpoints
def authenticated(method):
return functools.partial(method, self._db, uid, password)
self._execute = authenticated(self._object.execute)
self._exec_workflow = authenticated(self._object.exec_workflow)
self.report = authenticated(self._report.report)
self.report_get = authenticated(self._report.report_get)
if self.major_version != '5.0':
# Only for Odoo and OpenERP >= 6
self.execute_kw = authenticated(self._object.execute_kw)
self.render_report = authenticated(self._report.render_report)
if self._wizard:
self._wizard_execute = authenticated(self._wizard.execute)
self._wizard_create = authenticated(self._wizard.create)
return uid
# Needed for interactive use
connect = None
_login = login
_login.cache = {}
def _check_valid(self, database, uid, password):
execute = self._object.execute
try:
execute(database, uid, password, 'res.users', 'fields_get_keys')
return True
except Fault:
return False
def _auth(self, database, user, password):
assert database
cache_key = (self._server, database, user)
if password:
# If password is explicit, call the 'login' method
uid = None
else:
# Read from cache
uid, password = self._login.cache.get(cache_key) or (None, None)
# Read from table 'res.users'
if ((not uid and self._db == database and
self.access('res.users', 'write'))):
obj = self.read('res.users',
[('login', '=', user)], 'id password')
if obj:
uid = obj[0]['id']
password = obj[0]['password']
else:
# Invalid user
uid = False
# Ask for password
if not password and uid is not False:
from getpass import getpass
password = getpass('Password for %r: ' % user)
if uid:
# Check if password changed
if not self._check_valid(database, uid, password):
if cache_key in self._login.cache:
del self._login.cache[cache_key]
uid = False
elif uid is None:
# Do a standard 'login'
uid = self.common.login(database, user, password)
if uid:
# Update the cache
self._login.cache[cache_key] = (uid, password)
return (uid, password)
@classmethod
def _set_interactive(cls, global_vars={}):
# Don't call multiple times
del Client._set_interactive
for name in ['__name__', '__doc__'] + __all__:
global_vars[name] = globals()[name]
def get_pool(db_name=None):
"""Return a model registry.
Use get_pool(db_name).db.cursor() to grab a cursor.
With Odoo v8, use get_pool(db_name).cursor() instead.
"""
client = global_vars['client']
registry = client._server.modules.registry
return registry.RegistryManager.get(db_name or client._db)
def connect(self, env=None):
"""Connect to another environment and replace the globals()."""
if env:
# Safety measure: turn down the previous connection
global_vars['client'].reset()
client = self.from_config(env, verbose=self.db._verbose)
return
client = self
env = client._environment or client._db
try: # copy the context to the new client
client.context = dict(global_vars['client'].context)
except (KeyError, TypeError):
pass # client not yet in globals(), or context is None
global_vars['client'] = client
if hasattr(client._server, 'modules'):
global_vars['get_pool'] = get_pool
# Tweak prompt
sys.ps1 = '%s >>> ' % (env,)
sys.ps2 = '%s ... ' % (env,)
# Logged in?
if client.user:
global_vars['model'] = client.model
global_vars['models'] = client.models
global_vars['do'] = client.execute
print('Logged in as %r' % (client.user,))
else:
global_vars.update({'model': None, 'models': None, 'do': None})
def login(self, user, password=None, database=None):
"""Switch `user` and (optionally) `database`."""
try:
self._login(user, password=password, database=database)
except Error as exc:
print('%s: %s' % (exc.__class__.__name__, exc))
else:
# Register the new globals()
self.connect()
# Set hooks to recreate the globals()
cls.login = login
cls.connect = connect
return global_vars
def create_database(self, passwd, database, demo=False, lang='en_US',
user_password='admin'):
"""Create a new database.
The superadmin `passwd` and the `database` name are mandatory.
By default, `demo` data are not loaded and `lang` is ``en_US``.
Wait for the thread to finish and login if successful.
"""
if self.major_version in ('5.0', '6.0'):
thread_id = self.db.create(passwd, database, demo, lang,
user_password)
progress = 0
try:
while progress < 1:
time.sleep(3)
progress, users = self.db.get_progress(passwd, thread_id)
except KeyboardInterrupt:
return {'id': thread_id, 'progress': progress}
else:
self.db.create_database(passwd, database, demo, lang,
user_password)
return self.login('admin', user_password, database=database)
def execute(self, obj, method, *params, **kwargs):
"""Wrapper around ``object.execute`` RPC method.
Argument `method` is the name of an ``osv.osv`` method or
a method available on this `obj`.
Method `params` are allowed. If needed, keyword
arguments are collected in `kwargs`.
"""
assert self.user, 'Not connected'
assert isinstance(obj, basestring)
assert isinstance(method, basestring) and method != 'browse'
context = kwargs.pop('context', None)
ordered = single_id = False
if method == 'read':
assert params
if issearchdomain(params[0]):
# Combine search+read
search_params = searchargs(params[:1], kwargs, context)
ordered = len(search_params) > 3 and search_params[3]
ids = self._execute(obj, 'search', *search_params)
elif isinstance(params[0], list):
ordered = kwargs.pop('order', False) and params[0]
ids = set(params[0])
ids.discard(False)
if not ids and ordered:
return [False] * len(ordered)
ids = sorted(ids)
else:
single_id = True
ids = [params[0]] if params[0] else False
if not ids:
return ids
if len(params) > 1:
params = (ids,) + params[1:]
else:
params = (ids, kwargs.pop('fields', None))
elif method == 'search':
# Accept keyword arguments for the search method
params = searchargs(params, kwargs, context)
context = None
elif method == 'search_count':
params = searchargs(params)
elif method == 'perm_read':
# broken with a single id (verified with 5.0 and 6.1)
if params and isinstance(params[0], int_types):
params = ([params[0]],) + params[1:]
if context:
params = params + (context,)
# Ignore extra keyword arguments
for item in kwargs.items():
print('Ignoring: %s = %r' % item)
res = self._execute(obj, method, *params)
if ordered:
# The results are not in the same order as the ids
# when received from the server
resdic = dict([(val['id'], val) for val in res])
if not isinstance(ordered, list):
ordered = ids
res = [resdic.get(id_, False) for id_ in ordered]
return res[0] if single_id else res
def exec_workflow(self, obj, signal, obj_id):
"""Wrapper around ``object.exec_workflow`` RPC method.
Argument `obj` is the name of the model. The `signal`
is sent to the object identified by its integer ``id`` `obj_id`.
"""
assert self.user, 'Not connected'
assert isinstance(obj, basestring) and isinstance(signal, basestring)
return self._exec_workflow(obj, signal, obj_id)
def wizard(self, name, datas=None, action='init', context=None):
"""Wrapper around ``wizard.create`` and ``wizard.execute``
RPC methods.
If only `name` is provided, a new wizard is created and its ``id`` is
returned. If `action` is not ``"init"``, then the action is executed.
In this case the `name` is either an ``id`` or a string.
If the `name` is a string, the wizard is created before the execution.
The optional `datas` argument provides data for the action.
The optional `context` argument is passed to the RPC method.
Removed in OpenERP 7.
"""
if isinstance(name, int_types):
wiz_id = name
else:
wiz_id = self._wizard_create(name)
if datas is None:
if action == 'init' and name != wiz_id:
return wiz_id
datas = {}
if context is None:
context = self.context
return self._wizard_execute(wiz_id, datas, action, context)
def _upgrade(self, modules, button):
# First, update the list of modules
ir_module = self.model('ir.module.module', False)
updated, added = ir_module.update_list()
if added:
print('%s module(s) added to the list' % added)
# Find modules
ids = modules and ir_module.search([('name', 'in', modules)])
if ids:
# Safety check
mods = ir_module.read([_pending_state], 'name state')
if mods:
raise Error('Pending actions:\n' + '\n'.join(
(' %(state)s\t%(name)s' % mod) for mod in mods))
if button == 'button_uninstall':
# Safety check
names = ir_module.read([('id', 'in', ids),
'state != installed'], 'name')
if names:
raise Error('Not installed: %s' % ', '.join(names))
# A trick to uninstall dependent add-ons
ir_module.write(ids, {'state': 'to remove'})
try:
# Click upgrade/install/uninstall button
self.execute('ir.module.module', button, ids)
except Exception:
if button == 'button_uninstall':
ir_module.write(ids, {'state': 'installed'})
raise
mods = ir_module.read([_pending_state], 'name state')
if not mods:
if ids:
print('Already up-to-date: %s' %
self.modules([('id', 'in', ids)]))
elif modules:
raise Error('Module(s) not found: %s' % ', '.join(modules))
print('%s module(s) updated' % updated)
return
print('%s module(s) selected' % len(ids))
print('%s module(s) to process:' % len(mods))
for mod in mods:
print(' %(state)s\t%(name)s' % mod)
# Empty the models' cache
self._models.clear()
# Apply scheduled upgrades
if self.major_version == '5.0':
# Wizard "Apply Scheduled Upgrades"
rv = self.wizard('module.upgrade', action='start')
if 'config' not in [state[0] for state in rv.get('state', ())]:
# Something bad happened
return rv
else:
self.execute('base.module.upgrade', 'upgrade_module', [])
def upgrade(self, *modules):
"""Press the button ``Upgrade``."""
return self._upgrade(modules, button='button_upgrade')
def install(self, *modules):
"""Press the button ``Install``."""
return self._upgrade(modules, button='button_install')
def uninstall(self, *modules):
"""Press the button ``Uninstall``."""
return self._upgrade(modules, button='button_uninstall')
def search(self, obj, *params, **kwargs):
"""Filter the records in the `domain`, return the ``ids``."""
return self.execute(obj, 'search', *params, **kwargs)
def count(self, obj, domain=None):
"""Count the records in the `domain`."""
return self.execute(obj, 'search_count', domain or [])
def read(self, obj, *params, **kwargs):
"""Wrapper for ``client.execute(obj, 'read', [...], ('a', 'b'))``.
The first argument `obj` is the model name (example: ``"res.partner"``)
The second argument, `domain`, accepts:
- ``[('name', '=', 'mushroom'), ('state', '!=', 'draft')]``
- ``['name = mushroom', 'state != draft']``
- ``[]``
- a list of ids ``[1, 2, 3]`` or a single id ``42``
The third argument, `fields`, accepts:
- a single field: ``'first_name'``
- a tuple of fields: ``('street', 'city')``
- a space separated string: ``'street city'``
- a format spec: ``'%(street)s %(city)s'``
If `fields` is omitted, all fields are read.
If `domain` is a single id, then:
- return a single value if a single field is requested.
- return a string if a format spec is passed in the `fields` argument.
- else, return a dictionary.
If `domain` is not a single id, the returned value is a list of items.
Each item complies with the rules of the previous paragraph.
The optional keyword arguments `offset`, `limit` and `order` are
used to restrict the search. The `order` is also used to order the
results returned. Note: the low-level RPC method ``read`` itself does
not preserve the order of the results.
"""
fmt = None
if len(params) > 1 and isinstance(params[1], basestring):
fmt = ('%(' in params[1]) and params[1]
if fmt:
fields = _fields_re.findall(fmt)
else:
# transform: "zip city" --> ("zip", "city")
fields = params[1].split()
if len(fields) == 1:
fmt = () # marker
params = (params[0], fields) + params[2:]
res = self.execute(obj, 'read', *params, **kwargs)
if not res:
return res
if fmt:
if isinstance(res, list):
return [(d and fmt % d) for d in res]
return fmt % res
if fmt == ():
if isinstance(res, list):
return [(d and d[fields[0]]) for d in res]
return res[fields[0]]
return res
def _models_get(self, name):
try:
return self._models[name]
except KeyError:
self._models[name] = m = Model._new(self, name)
return m
def models(self, name=''):
"""Return a dictionary of models.
The argument `name` is a pattern to filter the models returned.
If omitted, all models are returned.
Keys are camel case names of the models.
Values are instances of :class:`Model`.
The return value can be used to declare the models in the global
namespace:
>>> globals().update(client.models('res.'))
"""
domain = [('model', 'like', name)]
models = self.execute('ir.model', 'read', domain, ('model',))
names = [m['model'] for m in models]
return dict([(mixedcase(mod), self._models_get(mod)) for mod in names])
def model(self, name, check=True):
"""Return a :class:`Model` instance.
The argument `name` is the name of the model. If the optional
argument `check` is :const:`False`, no validity check is done.
"""
try:
return self._models[name] if check else self._models_get(name)
except KeyError:
models = self.models(name)
if name in self._models:
return self._models[name]
if models:
errmsg = 'Model not found. These models exist:'
else:
errmsg = 'Model not found: %s' % (name,)
raise Error('\n * '.join([errmsg] + [str(m) for m in models.values()]))
def modules(self, name='', installed=None):
"""Return a dictionary of modules.
The optional argument `name` is a pattern to filter the modules.
If the boolean argument `installed` is :const:`True`, the modules
which are "Not Installed" or "Not Installable" are omitted. If
the argument is :const:`False`, only these modules are returned.
If argument `installed` is omitted, all modules are returned.
The return value is a dictionary where module names are grouped in
lists according to their ``state``.
"""
if isinstance(name, basestring):
domain = [('name', 'like', name)]
else:
domain = name
if installed is not None:
op = 'not in' if installed else 'in'
domain.append(('state', op, ['uninstalled', 'uninstallable']))
mods = self.read('ir.module.module', domain, 'name state')
if mods:
res = {}
for mod in mods:
if mod['state'] not in res:
res[mod['state']] = []
res[mod['state']].append(mod['name'])
return res
def keys(self, obj):
"""Wrapper for :meth:`Model.keys` method."""
return self.model(obj).keys()
def fields(self, obj, names=None):
"""Wrapper for :meth:`Model.fields` method."""
return self.model(obj).fields(names=names)
def field(self, obj, name):
"""Wrapper for :meth:`Model.field` method."""
return self.model(obj).field(name)
def access(self, obj, mode='read'):
"""Wrapper for :meth:`Model.access` method."""
try:
self._execute('ir.model.access', 'check', obj, mode)
return True
except (TypeError, Fault):
return False
def __getattr__(self, method):
if not method.islower():
return _memoize(self, method, self.model(lowercase(method)))
if method.startswith('_'):
errmsg = "'Client' object has no attribute %r" % method
raise AttributeError(errmsg)
# miscellaneous object methods
def wrapper(self, obj, *params, **kwargs):
"""Wrapper for client.execute(obj, %r, *params, **kwargs)."""
return self.execute(obj, method, *params, **kwargs)
return _memoize(self, method, wrapper, method)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.reset()
class Model(object):
"""The class for Odoo models."""
def __new__(cls, client, name):
return client.model(name)
@classmethod
def _new(cls, client, name):
m = object.__new__(cls)
(m.client, m._name) = (client, name)
m._execute = functools.partial(client.execute, name)
m.search = functools.partial(client.search, name)
m.count = functools.partial(client.count, name)
m.read = functools.partial(client.read, name)
return m
def __repr__(self):
return "<Model '%s'>" % (self._name,)
def _get_keys(self):
obj_keys = self._execute('fields_get_keys')
obj_keys.sort()
return obj_keys
def _get_fields(self):
return self._execute('fields_get')
def keys(self):
"""Return the keys of the model."""
return self._keys
def fields(self, names=None):
"""Return a dictionary of the fields of the model.
Optional argument `names` is a sequence of field names or
a space separated string of these names.
If omitted, all fields are returned.
"""
if names is None:
return self._fields
if isinstance(names, basestring):
names = names.split()
return dict([(k, v) for (k, v) in self._fields.items() if k in names])
def field(self, name):
"""Return the field properties for field `name`."""
return self._fields[name]
def access(self, mode="read"):
"""Check if the user has access to this model.
Optional argument `mode` is the access mode to check. Valid values
are ``read``, ``write``, ``create`` and ``unlink``. If omitted,
the ``read`` mode is checked. Return a boolean.
"""
return self.client.access(self._name, mode)
def browse(self, domain, *params, **kwargs):
"""Return a :class:`Record` or a :class:`RecordList`.
The argument `domain` accepts a single integer ``id``, a list of ids
or a search domain.
If it is a single integer, the return value is a :class:`Record`.
Otherwise, the return value is a :class:`RecordList`.
Be careful when passing a list of ids, because an empty list will be
considered an empty domain and will find all records in the database.
"""
context = kwargs.pop('context', self.client.context)
if isinstance(domain, int_types):
assert not params and not kwargs
return Record(self, domain, context=context)
if issearchdomain(domain):
params = searchargs((domain,) + params, kwargs, context)
domain = self._execute('search', *params)
# Ignore extra keyword arguments
for item in kwargs.items():
print('Ignoring: %s = %r' % item)
else:
assert not params and not kwargs
return RecordList(self, domain, context=context)
def get(self, domain, context=None):
"""Return a single :class:`Record`.
The argument `domain` accepts a single integer ``id`` or a
search domain, or an ``xml_id``. The return value is a
:class:`Record` or None. If multiple records are found,
a ``ValueError`` is raised.
"""
if context is None:
context = self.client.context
if isinstance(domain, int_types): # a single id
return Record(self, domain, context=context)
if isinstance(domain, basestring): # lookup the xml_id
(module, name) = domain.split('.')
data = self._imd_read(
[('module', '=', module), ('name', '=', name)], 'model res_id')
assert not data or data[0]['model'] == self._name
ids = [res['res_id'] for res in data]
else: # a search domain
assert issearchdomain(domain)
params = searchargs((domain,), {}, context)
ids = self._execute('search', *params)
if len(ids) > 1:
raise ValueError('domain matches too many records (%d)' % len(ids))
return Record(self, ids[0], context=context) if ids else None
def create(self, values, context=None):
"""Create a :class:`Record`.
The argument `values` is a dictionary of values which are used to
create the record. Relationship fields `one2many` and `many2many`
accept either a list of ids or a RecordList or the extended Odoo
syntax. Relationship fields `many2one` and `reference` accept
either a Record or the Odoo syntax.
The newly created :class:`Record` is returned.
"""
if context is None:
context = self.client.context
values = self._unbrowse_values(values)
new_id = self._execute('create', values, context=context)
return Record(self, new_id, context=context)
def _browse_values(self, values, context=None):
"""Wrap the values of a Record.
The argument `values` is a dictionary of values read from a Record.
When the field type is relational (many2one, one2many or many2many),
the value is wrapped in a Record or a RecordList.
Return a dictionary with the same keys as the `values` argument.
"""
for (key, value) in values.items():
if key == 'id' or hasattr(value, 'id'):
continue
field = self._fields[key]
field_type = field['type']
if field_type == 'many2one':
if value:
rel_model = self.client.model(field['relation'], False)
values[key] = Record(rel_model, value, context=context)
elif field_type in ('one2many', 'many2many'):
rel_model = self.client.model(field['relation'], False)
values[key] = RecordList(rel_model, value, context=context)
elif value and field_type == 'reference':
(res_model, res_id) = value.split(',')
rel_model = self.client.model(res_model, False)
values[key] = Record(rel_model, int(res_id), context=context)
return values
def _unbrowse_values(self, values):
"""Unwrap the id of Record and RecordList."""
new_values = values.copy()
for (key, value) in values.items():
field_type = self._fields[key]['type']
if hasattr(value, 'id'):
if field_type == 'reference':
new_values[key] = '%s,%s' % (value._model_name, value.id)
else:
new_values[key] = value = value.id
if field_type in ('one2many', 'many2many'):
if not value:
new_values[key] = [(6, 0, [])]
elif isinstance(value[0], int_types):
new_values[key] = [(6, 0, value)]
return new_values
def _get_external_ids(self, ids=None):
"""Retrieve the External IDs of the records.
Return a dictionary with keys being the fully qualified
External IDs, and values the ``Record`` entries.
"""
search_domain = [('model', '=', self._name)]
if ids is not None:
search_domain.append(('res_id', 'in', ids))
existing = self._imd_read(search_domain, ['module', 'name', 'res_id'])
res = {}
for rec in existing:
res['%(module)s.%(name)s' % rec] = self.get(rec['res_id'])
return res
def __getattr__(self, attr):
if attr in ('_keys', '_fields'):
return _memoize(self, attr, getattr(self, '_get' + attr)())
if attr.startswith('_imd_'):
imd = self.client.model('ir.model.data')
return _memoize(self, attr, getattr(imd, attr[5:]))
if attr.startswith('_'):
raise AttributeError("'Model' object has no attribute %r" % attr)
def wrapper(self, *params, **kwargs):
"""Wrapper for client.execute(%r, %r, *params, **kwargs)."""
if 'context' not in kwargs:
kwargs['context'] = self.client.context
return self._execute(attr, *params, **kwargs)
return _memoize(self, attr, wrapper, (self._name, attr))
class RecordList(object):
"""A sequence of Odoo :class:`Record`.
It has a similar API as the :class:`Record` class, but for a list of
records. The attributes of the ``RecordList`` are read-only, and they
return list of attribute values in the same order. The ``many2one``,
``one2many`` and ``many2many`` attributes are wrapped in ``RecordList``
and list of ``RecordList`` objects. Use the method ``RecordList.write``
to assign a single value to all the selected records.
"""
def __init__(self, res_model, ids, context=None):
idnames = list(ids)
for (index, id_) in enumerate(ids):
if isinstance(id_, (list, tuple)):
ids[index] = id_ = id_[0]
assert isinstance(id_, int_types), repr(id_)
if context is None:
context = res_model.client.context
# Bypass the __setattr__ method
self.__dict__.update({
'id': ids,
'_model_name': res_model._name,
'_model': res_model,
'_idnames': idnames,
'_context': context,
'_execute': res_model._execute,
})
def __repr__(self):
if len(self.id) > 16:
ids = 'length=%d' % len(self.id)
else:
ids = self.id
return "<RecordList '%s,%s'>" % (self._model_name, ids)
def __dir__(self):
return ['__getitem__', 'read', 'write', 'unlink', '_context',
'_idnames', '_model', '_model_name',
'_external_id'] + self._model._keys
def __len__(self):
return len(self.id)
def __add__(self, other):
assert self._model is other._model, 'Model mismatch'
ids = self._idnames + other._idnames
return RecordList(self._model, ids, self._context)
def read(self, fields=None, context=None):
"""Wrapper for :meth:`Record.read` method."""
if context is None:
context = self._context
client = self._model.client
if self.id:
values = client.read(self._model_name, self.id,
fields, order=True, context=context)
if is_list_of_dict(values):
browse_values = self._model._browse_values
return [v and browse_values(v, context) for v in values]
else:
values = []
if isinstance(fields, basestring):
field = self._model._fields.get(fields)
if field:
if field['type'] == 'many2one':
rel_model = client.model(field['relation'], False)
return RecordList(rel_model, values, context=context)
if field['type'] in ('one2many', 'many2many'):
rel_model = client.model(field['relation'], False)
return [RecordList(rel_model, v, context) for v in values]
if field['type'] == 'reference':
records = []
for value in values:
if value:
(res_model, res_id) = value.split(',')
rel_model = client.model(res_model, False)
value = Record(rel_model, int(res_id), context)
records.append(value)
return records
return values
def write(self, values, context=None):
"""Wrapper for :meth:`Record.write` method."""
if not self.id:
return True
if context is None:
context = self._context
values = self._model._unbrowse_values(values)
rv = self._execute('write', self.id, values, context=context)
return rv
def unlink(self, context=None):
"""Wrapper for :meth:`Record.unlink` method."""
if not self.id:
return True
if context is None:
context = self._context
rv = self._execute('unlink', self.id, context=context)
return rv
@property
def _external_id(self):
"""Retrieve the External IDs of the :class:`RecordList`.
Return the fully qualified External IDs with default value
False if there's none. If multiple IDs exist for a record,
only one of them is returned (randomly).
"""
xml_ids = dict([(r.id, xml_id) for (xml_id, r) in
self._model._get_external_ids(self.id).items()])
return [xml_ids.get(res_id, False) for res_id in self.id]
def __getitem__(self, key):
idname = self._idnames[key]
if idname is False:
return False
cls = RecordList if isinstance(key, slice) else Record
return cls(self._model, idname, context=self._context)
def __getattr__(self, attr):
context = self._context
if attr in self._model._keys:
return self.read(attr, context=context)
if attr.startswith('_'):
errmsg = "'RecordList' object has no attribute %r" % attr
raise AttributeError(errmsg)
def wrapper(self, *params, **kwargs):
"""Wrapper for client.execute(%r, %r, [...], *params, **kwargs)."""
if context is not None and 'context' not in kwargs:
kwargs['context'] = context
return self._execute(attr, self.id, *params, **kwargs)
return _memoize(self, attr, wrapper, (self._model_name, attr))
def __setattr__(self, attr, value):
if attr in self._model._keys or attr == 'id':
msg = "attribute %r is read-only; use 'RecordList.write' instead."
else:
msg = "has no attribute %r"
raise AttributeError("'RecordList' object %s" % msg % attr)
def __eq__(self, other):
return (isinstance(other, RecordList) and
self.id == other.id and self._model is other._model)
class Record(object):
"""A class for all Odoo records.
It maps any Odoo object.
The fields can be accessed through attributes. The changes are immediately
sent to the server.
The ``many2one``, ``one2many`` and ``many2many`` attributes are wrapped in
``Record`` and ``RecordList`` objects. These attributes support writing
too.
The attributes are evaluated lazily, and they are cached in the record.
The Record's cache is invalidated if any attribute is changed.
"""
def __init__(self, res_model, res_id, context=None):
if isinstance(res_id, (list, tuple)):
(res_id, res_name) = res_id
self.__dict__['_name'] = res_name
assert isinstance(res_id, int_types), repr(res_id)
if context is None:
context = res_model.client.context
# Bypass the __setattr__ method
self.__dict__.update({
'id': res_id,
'_model_name': res_model._name,
'_model': res_model,
'_context': context,
'_cached_keys': set(),
'_execute': res_model._execute,
})
def __repr__(self):
return "<Record '%s,%d'>" % (self._model_name, self.id)
def __str__(self):
return self._name
if PY2:
__unicode__ = __str__
def __str__(self):
return self._name.encode('ascii', 'backslashreplace')
def _get_name(self):
try:
(id_name,) = self._execute('name_get', [self.id])
name = '%s' % (id_name[1],)
except Exception:
name = '%s,%d' % (self._model_name, self.id)
return _memoize(self, '_name', name)
@property
def _keys(self):
return self._model._keys
@property
def _fields(self):
return self._model._fields
def refresh(self):
"""Force refreshing the record's data."""
self._cached_keys.discard('id')
for key in self._cached_keys:
delattr(self, key)
self._cached_keys.clear()
def _update(self, values):
new_values = self._model._browse_values(values, context=self._context)
self.__dict__.update(new_values)
self._cached_keys.update(new_values)
return new_values
def read(self, fields=None, context=None):
"""Read the `fields` of the :class:`Record`.
The argument `fields` accepts different kinds of values.
See :meth:`Client.read` for details.
"""
if context is None:
context = self._context
rv = self._model.read(self.id, fields, context=context)
if isinstance(rv, dict):
return self._update(rv)
elif isinstance(fields, basestring) and '%(' not in fields:
return self._update({fields: rv})[fields]
return rv
def perm_read(self, context=None):
"""Read the metadata of the :class:`Record`.
Return a dictionary of values.
See :meth:`Client.perm_read` for details.
"""
if context is None:
context = self._context
rv = self._execute('perm_read', [self.id], context=context)
return rv[0] if rv else None
def write(self, values, context=None):
"""Write the `values` in the :class:`Record`.
`values` is a dictionary of values.
See :meth:`Model.create` for details.
"""
if context is None:
context = self._context
values = self._model._unbrowse_values(values)
rv = self._execute('write', [self.id], values, context=context)
self.refresh()
return rv
def unlink(self, context=None):
"""Delete the current :class:`Record` from the database."""
if context is None:
context = self._context
rv = self._execute('unlink', [self.id], context=context)
self.refresh()
return rv
def copy(self, default=None, context=None):
"""Copy a record and return the new :class:`Record`.
The optional argument `default` is a mapping which overrides some
values of the new record.
"""
if context is None:
context = self._context
if default:
default = self._model._unbrowse_values(default)
new_id = self._execute('copy', self.id, default, context=context)
return Record(self._model, new_id, context=context)
def _send(self, signal):
"""Trigger workflow `signal` for this :class:`Record`."""
exec_workflow = self._model.client.exec_workflow
rv = exec_workflow(self._model_name, signal, self.id)
self.refresh()
return rv
@property
def _external_id(self):
"""Retrieve the External ID of the :class:`Record`.
Return the fully qualified External ID of the :class:`Record`,
with default value False if there's none. If multiple IDs
exist, only one of them is returned (randomly).
"""
xml_ids = self._model._get_external_ids([self.id])
return list(xml_ids)[0] if xml_ids else False
def _set_external_id(self, xml_id):
"""Set the External ID of this record."""
(mod, name) = xml_id.split('.')
obj = self._model_name
domain = ['|', '&', ('model', '=', obj), ('res_id', '=', self.id),
'&', ('module', '=', mod), ('name', '=', name)]
if self._model._imd_search(domain):
raise ValueError('ID %r collides with another entry' % xml_id)
vals = {'model': obj, 'res_id': self.id, 'module': mod, 'name': name}
self._model._imd_create(vals)
def __dir__(self):
return ['read', 'write', 'copy', 'unlink', '_send', 'refresh',
'_context', '_model', '_model_name', '_name', '_external_id',
'_keys', '_fields'] + self._model._keys
def __getattr__(self, attr):
context = self._context
if attr in self._model._keys:
return self.read(attr, context=context)
if attr == '_name':
return self._get_name()
if attr.startswith('_'):
raise AttributeError("'Record' object has no attribute %r" % attr)
def wrapper(self, *params, **kwargs):
"""Wrapper for client.execute(%r, %r, %d, *params, **kwargs)."""
if context is not None and 'context' not in kwargs:
kwargs['context'] = context
res = self._execute(attr, [self.id], *params, **kwargs)
self.refresh()
if isinstance(res, list) and len(res) == 1:
return res[0]
return res
return _memoize(self, attr, wrapper, (self._model_name, attr, self.id))
def __setattr__(self, attr, value):
if attr == '_external_id':
return self._set_external_id(value)
if attr not in self._model._keys:
raise AttributeError("'Record' object has no attribute %r" % attr)
if attr == 'id':
raise AttributeError("'Record' object attribute 'id' is read-only")
self.write({attr: value})
def __eq__(self, other):
return (isinstance(other, Record) and
self.id == other.id and self._model is other._model)
def _interact(global_vars, use_pprint=True, usage=USAGE):
import code
import pprint
if PY2:
import __builtin__ as builtins
def _exec(code, g):
exec('exec code in g')
else:
import builtins
_exec = getattr(builtins, 'exec')
if use_pprint:
def displayhook(value, _printer=pprint.pprint, _builtins=builtins):
# Pretty-format the output
if value is None:
return
_printer(value)
_builtins._ = value
sys.displayhook = displayhook
class Usage(object):
def __call__(self):
print(usage)
__repr__ = lambda s: usage
builtins.usage = Usage()
try:
import readline as rl
import rlcompleter
rl.parse_and_bind('tab: complete')
# IOError if file missing, or broken Apple readline
rl.read_history_file(HIST_FILE)
except Exception:
pass
else:
if rl.get_history_length() < 0:
rl.set_history_length(int(os.getenv('HISTSIZE', 500)))
# better append instead of replace?
atexit.register(rl.write_history_file, HIST_FILE)
class Console(code.InteractiveConsole):
def runcode(self, code):
try:
_exec(code, global_vars)
except SystemExit:
raise
except:
# Print readable 'Fault' errors
# Work around http://bugs.python.org/issue12643
(exc_type, exc, tb) = sys.exc_info()
msg = ''.join(format_exception(exc_type, exc, tb, chain=False))
print(msg.strip())
sys.exc_clear() if hasattr(sys, 'exc_clear') else None # Python 2.x
# Key UP to avoid an empty line
Console().interact('\033[A')
def main(interact=_interact):
description = ('Inspect data on Odoo objects. Use interactively '
'or query a model (-m) and pass search terms or '
'ids as positional parameters after the options.')
parser = optparse.OptionParser(
usage='%prog [options] [search_term_or_id [search_term_or_id ...]]',
version=__version__,
description=description)
parser.add_option(
'-l', '--list', action='store_true', dest='list_env',
help='list sections of the configuration')
parser.add_option(
'--env',
help='read connection settings from the given section')
parser.add_option(
'-c', '--config', default=None,
help='specify alternate config file (default: %r)' % CONF_FILE)
parser.add_option(
'--server', default=None,
help='full URL to the XML-RPC server (default: %s)' % DEFAULT_URL)
parser.add_option('-d', '--db', default=DEFAULT_DB, help='database')
parser.add_option('-u', '--user', default=None, help='username')
parser.add_option(
'-p', '--password', default=None,
help='password, or it will be requested on login')
parser.add_option(
'-m', '--model',
help='the type of object to find')
parser.add_option(
'-f', '--fields', action='append',
help='restrict the output to certain fields (multiple allowed)')
parser.add_option(
'-i', '--interact', action='store_true',
help='use interactively; default when no model is queried')
parser.add_option(
'-v', '--verbose', default=0, action='count',
help='verbose')
(args, domain) = parser.parse_args()
Client._config_file = os.path.join(os.curdir, args.config or CONF_FILE)
if args.list_env:
print('Available settings: ' + ' '.join(read_config()))
return
if (args.interact or not args.model):
global_vars = Client._set_interactive()
print(USAGE)
if args.env:
client = Client.from_config(args.env,
user=args.user, verbose=args.verbose)
else:
if not args.server:
args.server = ['-c', args.config] if args.config else DEFAULT_URL
if not args.user:
args.user = DEFAULT_USER
client = Client(args.server, args.db, args.user, args.password,
verbose=args.verbose)
client.context = {'lang': (os.getenv('LANG') or 'en_US').split('.')[0]}
if args.model and domain and client.user:
data = client.execute(args.model, 'read', domain, args.fields)
if not args.fields:
args.fields = ['id']
if data:
args.fields.extend([fld for fld in data[0] if fld != 'id'])
writer = _DictWriter(sys.stdout, args.fields, "", "ignore",
quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
writer.writerows(data)
if client.connect is not None:
if not client.user:
client.connect()
# Enter interactive mode
return interact(global_vars) if interact else global_vars
if __name__ == '__main__':
main()
| {
"content_hash": "52b6e23b313e61c68d775144f0515019",
"timestamp": "",
"source": "github",
"line_count": 1681,
"max_line_length": 79,
"avg_line_length": 38.55502676977989,
"alnum_prop": 0.5540263226921356,
"repo_name": "Endika/erppeek",
"id": "05635a36f19fa157b9626f6a693eea052449fd33",
"size": "64857",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "erppeek.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "153367"
}
],
"symlink_target": ""
} |
from signbank.pages.models import *
from django.template import loader, RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.conf import settings
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
from signbank.dictionary.models import Language, Dataset
DEFAULT_TEMPLATE = 'pages/default.html'
@csrf_protect
def page(request, url='/'):
"""
Flat page view.
Models: `pages.page`
Templates: Uses the template defined by the ``template_name`` field,
or `pages/default.html` if template_name is not defined.
Context:
page
`pages.page` object
"""
if not url.endswith('/') and settings.APPEND_SLASH:
return HttpResponseRedirect("%s/" % request.path)
if not url.startswith('/'):
url = "/" + url
# here I've removed the requirement that the page be for this site
# - this won't work if we ever have more than one site here
# which isn't planned
# deal with the lack of a root page
try:
f = Page.objects.get(url__exact=url)
except:
# no page, if we're after the root page then serve a default page
if url == '/':
f = Page(title='No Pages',
content='<p>No pages defined. Login to <a href="/admin"> to create some.</p>')
else:
t = loader.get_template("404.html")
return HttpResponseNotFound(t.render(request=request))
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
# if len(f.group_required.all()) > 0:
#
# if not request.user.is_authenticated() :
# from django.contrib.auth.views import redirect_to_login
# return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
if request.LANGUAGE_CODE == 'nl':
f.title = mark_safe(f.title_dutch)
f.content = mark_safe(f.content_dutch)
elif request.LANGUAGE_CODE == 'zh-hans':
f.title = mark_safe(f.title_chinese)
f.content = mark_safe(f.content_chinese)
else:
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
from signbank.tools import get_selected_datasets_for_user
selected_datasets = get_selected_datasets_for_user(request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
response = HttpResponse(t.render({'page': f,
'dataset_languages': dataset_languages,
'selected_datasets': selected_datasets,
'SHOW_DATASET_INTERFACE_OPTIONS': settings.SHOW_DATASET_INTERFACE_OPTIONS
},request))
return response | {
"content_hash": "c0ccd8ca22529fcdbd4a93d9205d2fa8",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 111,
"avg_line_length": 39.024096385542165,
"alnum_prop": 0.6356900277863539,
"repo_name": "Signbank/NGT-signbank",
"id": "020baea60196c653a5410820442468b1d0a1e2f6",
"size": "3239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signbank/pages/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "441343"
},
{
"name": "HTML",
"bytes": "401393"
},
{
"name": "JavaScript",
"bytes": "737137"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "818708"
}
],
"symlink_target": ""
} |
from lean_workbench.core import db
company_tags_association = db.Table('company_tags_association',
db.Column('company_id', db.Integer, db.ForeignKey('crunchbase_company.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
class CrunchbaseCompanyModel(db.Model):
__tablename__ = "crunchbase_company"
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String)
tags = db.relationship("Tag", secondary = company_tags_association)
number_of_employees = db.Column(db.Integer)
founded_year = db.Column(db.Integer(4))
founded_month = db.Column(db.Integer)
founded_day = db.Column(db.Integer)
image= db.Column(db.String)
crunchbase_url = db.Column(db.String)
homepage_url = db.Column(db.String)
# startup or finanical organization
company_type = db.Column(db.String)
# associated user
username = db.Column(db.String)
state_code = db.Column(db.String)
class Tag(db.Model):
"""
Company semantic tags
"""
__tablename__ = "tag"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
def __init__(self, name=None):
self.name = names | {
"content_hash": "f760f0d97ea19f53e59226461eb41672",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 30.77777777777778,
"alnum_prop": 0.7129963898916968,
"repo_name": "wigginslab/lean-workbench",
"id": "bb7eb24061ccb79f30da57a7d36d51b3d50d1740",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lean_workbench/crunchbase/crunchbase_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "CSS",
"bytes": "8523037"
},
{
"name": "HTML",
"bytes": "1204783"
},
{
"name": "JavaScript",
"bytes": "1385939"
},
{
"name": "Makefile",
"bytes": "1307"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "353032"
},
{
"name": "Shell",
"bytes": "2058"
}
],
"symlink_target": ""
} |
"""Services for questions data model."""
import logging
from core.domain import question_domain
from core.platform import models
import feconf
(question_models,) = models.Registry.import_models([models.NAMES.question])
# This takes additional 'title' parameters.
CMD_CREATE_NEW = 'create_new'
def _create_question(committer_id, question, commit_message):
"""Creates a new question.
Args:
committer_id: str. ID of the committer.
question: Question. question domain object.
commit_message: str. A description of changes made to the question.
"""
model = question_models.QuestionModel.create(
title=question.title,
question_data=question.question_data,
question_data_schema_version=question.question_data_schema_version,
collection_id=question.collection_id,
language_code=question.language_code,
)
model.commit(committer_id, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': question.title
}])
return model
def add_question(committer_id, question):
"""Saves a new question.
Args:
committer_id: str. ID of the committer.
question: Question. Question to be saved.
"""
commit_message = (
'New question created with title \'%s\'.' % question.title)
question_model = _create_question(committer_id, question, commit_message)
return question_model
def delete_question(committer_id, question_id, force_deletion=False):
"""Deletes the question with the given question_id.
Args:
committer_id: str. ID of the committer.
question_id: str. ID of the question.
force_deletion: bool. If true, the question and its history are fully
deleted and are unrecoverable. Otherwise, the question and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
"""
question_model = question_models.QuestionModel.get(question_id)
question_model.delete(
committer_id, feconf.COMMIT_MESSAGE_QUESTION_DELETED,
force_deletion=force_deletion)
def get_question_from_model(question_model):
"""Returns domain object repersenting the given question model.
Args:
question_model: QuestionModel. The question model loaded from the
datastore.
Returns:
Question. The domain object representing the question model.
"""
return question_domain.Question(
question_model.id, question_model.title, question_model.question_data,
question_model.question_data_schema_version,
question_model.collection_id, question_model.language_code)
def get_question_by_id(question_id):
"""Returns a domain object representing a question.
Args:
question_id: str. ID of the question.
Returns:
Question or None. The domain object representing a question with the
given id, or None if it does not exist.
"""
question_model = question_models.QuestionModel.get(question_id)
if question_model:
question = get_question_from_model(question_model)
return question
else:
return None
def apply_change_list(question_id, change_list):
"""Applies a changelist to a pristine question and returns the result.
Args:
question_id: str. ID of the given question.
change_list: list(QuestionChange). A change list to be applied to the
given question. Each entry in change_list is a QuestionChange
object.
Returns:
Question. The resulting question domain object.
"""
question = get_question_by_id(question_id)
try:
for change in change_list:
if change.cmd == question_domain.CMD_UPDATE_QUESTION_PROPERTY:
if (change.property_name ==
question_domain.QUESTION_PROPERTY_TITLE):
question.update_title(change.new_value)
elif (change.property_name ==
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE):
question.update_language_code(change.new_value)
elif (change.cmd ==
question_domain.QUESTION_PROPERTY_QUESTION_DATA):
question.update_question_data(change.new_value)
return question
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, question_id, change_list)
)
raise
def _save_question(committer_id, question, change_list, commit_message):
"""Validates a question and commits it to persistent storage.
Args:
committer_id: str. The id of the user who is performing the update
action.
question: Question. The domain object representing a question.
change_list: list(QuestionChange). A list of QuestionChange objects.
These changes are applied in sequence to produce the resulting
question.
commit_message: str or None. A description of changes made to the
question.
Raises:
Exception: Received an invalid change list.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save question %s: %s' % (question.id, change_list))
question.validate()
question_model = question_models.QuestionModel.get(question.question_id)
question_model.title = question.title
question_model.question_data = question.question_data
question_model.question_data_schema_version = (
question.question_data_schema_version)
question_model.collection_id = question.collection_id
question_model.language_code = question.language_code
change_list_dict = [change.to_dict() for change in change_list]
question_model.commit(committer_id, commit_message, change_list_dict)
def update_question(committer_id, question_id, change_list, commit_message):
"""Updates a question. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
question_id: str. The question ID.
change_list: list(QuestionChange). A list of QuestionChange objects.
These changes are applied in sequence to produce the resulting
question.
commit_message: str or None. A description of changes made to the
question.
"""
question = apply_change_list(question_id, change_list)
_save_question(committer_id, question, change_list, commit_message)
| {
"content_hash": "268e2c09cc5d9a009fae30cca6327477",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 79,
"avg_line_length": 35.6096256684492,
"alnum_prop": 0.6601591830605196,
"repo_name": "terrameijar/oppia",
"id": "27f0defb20e949667fffbb4e4e51e2e5c5a868ae",
"size": "7282",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "core/domain/question_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "95342"
},
{
"name": "HTML",
"bytes": "850374"
},
{
"name": "JavaScript",
"bytes": "2597367"
},
{
"name": "Python",
"bytes": "3177521"
},
{
"name": "Shell",
"bytes": "46904"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('topics', '0007_auto_20160829_0859'),
]
operations = [
migrations.AlterField(
model_name='favorite',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
| {
"content_hash": "5df71e2ae7f725574689197a158e3881",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 110,
"avg_line_length": 24.82608695652174,
"alnum_prop": 0.6252189141856392,
"repo_name": "emelkrml/dictionary2",
"id": "e96ebee7ef98b9ced930ae59fd2907137fac1525",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dictionary2/topics/migrations/0008_auto_20160829_0900.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2084"
},
{
"name": "C",
"bytes": "8307458"
},
{
"name": "C++",
"bytes": "169028"
},
{
"name": "CSS",
"bytes": "164525"
},
{
"name": "HTML",
"bytes": "673752"
},
{
"name": "JavaScript",
"bytes": "320596"
},
{
"name": "Lua",
"bytes": "23774"
},
{
"name": "M4",
"bytes": "112836"
},
{
"name": "Makefile",
"bytes": "88956"
},
{
"name": "Perl",
"bytes": "350622"
},
{
"name": "Python",
"bytes": "63562"
},
{
"name": "Roff",
"bytes": "14472"
},
{
"name": "Ruby",
"bytes": "142272"
},
{
"name": "Shell",
"bytes": "74904"
},
{
"name": "Smarty",
"bytes": "2094"
},
{
"name": "Tcl",
"bytes": "943594"
},
{
"name": "XSLT",
"bytes": "606"
}
],
"symlink_target": ""
} |
"""A module providing information about the necessity of brackets"""
from __future__ import print_function, division
from sympy.core.function import _coeff_isneg
# Default precedence values for some basic types
PRECEDENCE = {
"Lambda": 1,
"Xor": 10,
"Or": 20,
"And": 30,
"Relational": 35,
"Add": 40,
"Mul": 50,
"Pow": 60,
"Func": 70,
"Not": 100,
"Atom": 1000
}
# A dictionary assigning precedence values to certain classes. These values are
# treated like they were inherited, so not every single class has to be named
# here.
PRECEDENCE_VALUES = {
"Equivalent": PRECEDENCE["Xor"],
"Xor": PRECEDENCE["Xor"],
"Implies": PRECEDENCE["Xor"],
"Or": PRECEDENCE["Or"],
"And": PRECEDENCE["And"],
"Add": PRECEDENCE["Add"],
"Pow": PRECEDENCE["Pow"],
"Relational": PRECEDENCE["Relational"],
"Sub": PRECEDENCE["Add"],
"Not": PRECEDENCE["Not"],
"Function" : PRECEDENCE["Func"],
"NegativeInfinity": PRECEDENCE["Add"],
"MatAdd": PRECEDENCE["Add"],
"MatMul": PRECEDENCE["Mul"],
"MatPow": PRECEDENCE["Pow"],
"HadamardProduct": PRECEDENCE["Mul"]
}
# Sometimes it's not enough to assign a fixed precedence value to a
# class. Then a function can be inserted in this dictionary that takes
# an instance of this class as argument and returns the appropriate
# precedence value.
# Precedence functions
def precedence_Mul(item):
if _coeff_isneg(item):
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Rational(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Integer(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_Float(item):
if item < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_PolyElement(item):
if item.is_generator:
return PRECEDENCE["Atom"]
elif item.is_ground:
return precedence(item.coeff(1))
elif item.is_term:
return PRECEDENCE["Mul"]
else:
return PRECEDENCE["Add"]
def precedence_FracElement(item):
if item.denom == 1:
return precedence_PolyElement(item.numer)
else:
return PRECEDENCE["Mul"]
PRECEDENCE_FUNCTIONS = {
"Integer": precedence_Integer,
"Mul": precedence_Mul,
"Rational": precedence_Rational,
"Float": precedence_Float,
"PolyElement": precedence_PolyElement,
"FracElement": precedence_FracElement,
}
def precedence(item):
"""
Returns the precedence of a given object.
"""
if hasattr(item, "precedence"):
return item.precedence
try:
mro = item.__class__.__mro__
except AttributeError:
return PRECEDENCE["Atom"]
for i in mro:
n = i.__name__
if n in PRECEDENCE_FUNCTIONS:
return PRECEDENCE_FUNCTIONS[n](item)
elif n in PRECEDENCE_VALUES:
return PRECEDENCE_VALUES[n]
return PRECEDENCE["Atom"]
| {
"content_hash": "1c200af039b01cfcce8f9cb56177b160",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 25.025,
"alnum_prop": 0.6366966366966367,
"repo_name": "lindsayad/sympy",
"id": "83e503dc21026cbc61eee408c4cacf2740b3c005",
"size": "3003",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sympy/printing/precedence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14115627"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""Simple helper script to run pytype on GPU Python code."""
import os
import sys
# We can't depend on gpu_path_util, otherwise pytype's dependency graph ends up
# finding a cycle.
GPU_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_SRC_DIR = os.path.realpath(os.path.join(GPU_DIR, '..', '..', '..'))
sys.path.append(os.path.join(CHROMIUM_SRC_DIR, 'testing'))
from pytype_common import pytype_runner # pylint: disable=wrong-import-position
# This list should be kept in sync with EXTRA_PATH_COMPONENTS in PRESUBMIT.py
EXTRA_PATHS_COMPONENTS = [
('build', ),
('build', 'fuchsia'),
('build', 'util'),
('testing', ),
('third_party', 'catapult', 'common', 'py_utils'),
('third_party', 'catapult', 'devil'),
('third_party', 'catapult', 'telemetry'),
('third_party', 'catapult', 'third_party', 'typ'),
('tools', 'perf'),
]
EXTRA_PATHS = [
os.path.join(CHROMIUM_SRC_DIR, *p) for p in EXTRA_PATHS_COMPONENTS
]
EXTRA_PATHS.append(GPU_DIR)
FILES_AND_DIRECTORIES_TO_CHECK = [
'flake_suppressor',
'gold_inexact_matching',
'gpu_tests',
'unexpected_passes',
]
FILES_AND_DIRECTORIES_TO_CHECK = [
os.path.join(GPU_DIR, f) for f in FILES_AND_DIRECTORIES_TO_CHECK
]
TEST_NAME = 'gpu_pytype'
TEST_LOCATION = '//content/test/gpu/run_pytype.py'
def main() -> int:
return pytype_runner.run_pytype(TEST_NAME, TEST_LOCATION,
FILES_AND_DIRECTORIES_TO_CHECK, EXTRA_PATHS,
GPU_DIR)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "dbc57de07f202344d58ebe21ad8d7b59",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 29.30188679245283,
"alnum_prop": 0.6297488731487444,
"repo_name": "nwjs/chromium.src",
"id": "86266026adc7ec73849f21dbb9db693b66b4a1c5",
"size": "1717",
"binary": false,
"copies": "6",
"ref": "refs/heads/nw70",
"path": "content/test/gpu/run_pytype.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'OrderedProduct', fields ['list', 'product']
db.create_unique(u'promotions_orderedproduct', ['list_id', 'product_id'])
def backwards(self, orm):
# Removing unique constraint on 'OrderedProduct', fields ['list', 'product']
db.delete_unique(u'promotions_orderedproduct', ['list_id', 'product_id'])
models = {
u'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
},
u'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['catalogue.ProductClass']"}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'upc': ('oscar.models.fields.NullCharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'unique_together': "(('attribute', 'product'),)", 'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['product', 'category']", 'unique_together': "(('product', 'category'),)", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productrecommendation': {
'Meta': {'unique_together': "(('primary', 'recommendation'),)", 'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'promotions.automaticproductlist': {
'Meta': {'object_name': 'AutomaticProductList'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_products': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4'})
},
u'promotions.handpickedproductlist': {
'Meta': {'object_name': 'HandPickedProductList'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalogue.Product']", 'null': 'True', 'through': u"orm['promotions.OrderedProduct']", 'blank': 'True'})
},
u'promotions.image': {
'Meta': {'object_name': 'Image'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'promotions.keywordpromotion': {
'Meta': {'object_name': 'KeywordPromotion'},
'clicks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'filter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'promotions.multiimage': {
'Meta': {'object_name': 'MultiImage'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['promotions.Image']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'promotions.orderedproduct': {
'Meta': {'ordering': "('display_order',)", 'unique_together': "(('list', 'product'),)", 'object_name': 'OrderedProduct'},
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotions.HandPickedProductList']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'promotions.orderedproductlist': {
'Meta': {'ordering': "('display_order',)", 'object_name': 'OrderedProductList', '_ormbases': [u'promotions.HandPickedProductList']},
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'handpickedproductlist_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['promotions.HandPickedProductList']", 'unique': 'True', 'primary_key': 'True'}),
'tabbed_block': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tabs'", 'to': u"orm['promotions.TabbedBlock']"})
},
u'promotions.pagepromotion': {
'Meta': {'object_name': 'PagePromotion'},
'clicks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'page_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '128', 'db_index': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'promotions.rawhtml': {
'Meta': {'object_name': 'RawHTML'},
'body': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'promotions.singleproduct': {
'Meta': {'object_name': 'SingleProduct'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'promotions.tabbedblock': {
'Meta': {'object_name': 'TabbedBlock'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['promotions'] | {
"content_hash": "66bff66aec0fa4714c623bd67f0b172e",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 233,
"avg_line_length": 83.26382978723404,
"alnum_prop": 0.5651862830275464,
"repo_name": "marcoantoniooliveira/labweb",
"id": "d1a6baf867fa2091049119f95f060135fcf36c72",
"size": "19591",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "oscar/apps/promotions/south_migrations/0006_auto__add_unique_orderedproduct_list_product.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
long_description = "\n".join(
(
open("README.rst", encoding="utf-8").read(),
open("CHANGES.txt", encoding="utf-8").read(),
)
)
setup(
name="more.jinja2",
version="0.3.dev0",
description="Jinja2 template integration for Morepath",
long_description=long_description,
author="Martijn Faassen",
author_email="[email protected]",
keywords="morepath jinja2",
license="BSD",
url="https://github.com/morepath/more.jinja2",
namespace_packages=["more"],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
python_requires=">=3.6",
install_requires=["setuptools", "morepath >= 0.15", "Jinja2 >= 2.9"],
extras_require=dict(test=["pytest >= 7", "pytest-cov", "WebTest"]),
)
| {
"content_hash": "da2751f4a391748810c7fb16ef5c046a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 32.916666666666664,
"alnum_prop": 0.610126582278481,
"repo_name": "morepath/more.jinja2",
"id": "b84502b1897955a3fc0c259f8518986cbb549f6c",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3790"
}
],
"symlink_target": ""
} |
__all__ = ['SOAPCallbackHandler', 'SOAPHandlerChainFactory']
from turboengine.errors import TException
try:
from ZSI.twisted import wsgi
from ZSI.twisted.reverse import ReverseHandlerChain
from ZSI.twisted.reverse import DataHandler
except ImportError:
raise TException('If you want to use webservices please install ZSI and zope.interface or put it into PYTHONPATH')
##################################################################################
## Hacking this class to enable a ServiceSOAPBinding subclass with wsgi
## And SOAPApplication and delegate field.
##################################################################################
class SOAPCallbackHandler(wsgi.SOAPCallbackHandler):
@classmethod
def processRequest(cls, ps, **kw):
"""invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request
"""
resource = kw['resource']
method = resource.getOperation(ps, None) # This getOperation method is valid for ServiceSOAPBinding subclass
rsp = method(ps, **kw)[1] # return (request, response) but we only need response
return rsp
class SOAPHandlerChainFactory(wsgi.SOAPHandlerChainFactory):
protocol = ReverseHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(DataHandler, SOAPCallbackHandler)
##################################################################################
## end Hacking
################################################################################## | {
"content_hash": "35e150491987c1ead8c858b885c9b557",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 118,
"avg_line_length": 39.27906976744186,
"alnum_prop": 0.5855535820011841,
"repo_name": "carlitux/turboengine",
"id": "7501c77a3740d6a774b878394e845f964f0a4935",
"size": "3010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/turboengine/webservices/hacks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25752"
}
],
"symlink_target": ""
} |
from django.db.models.signals import pre_delete, pre_save
from django.dispatch import receiver
from ampadb.support import signal_clean
from .models import Profile, UnregisteredUser
@receiver(pre_save, sender=Profile)
def profile_pre_save(sender, **kwargs):
signal_clean(sender, **kwargs)
@receiver(pre_delete, sender=Profile)
def profile_pre_delete(sender, **kwargs):
"""Eliminar l'usuari d'un perfil quan aquest s'elimina."""
instance = kwargs['instance']
if instance.user:
instance.user.delete()
if instance.unregisteredUser:
instance.unregisteredUser.delete()
@receiver(pre_save, sender=UnregisteredUser)
def uu_pre_save(sender, **kwargs):
signal_clean(sender, **kwargs)
| {
"content_hash": "45f93d6bad7c8d6faa70861d5abbe170",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 62,
"avg_line_length": 30,
"alnum_prop": 0.7347222222222223,
"repo_name": "victorg590/ampadb",
"id": "bf350a10d8b0a7d5da8f45d8b2f021a3c90077a1",
"size": "754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "usermanager/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "576"
},
{
"name": "CSS",
"bytes": "1626"
},
{
"name": "HTML",
"bytes": "51067"
},
{
"name": "JavaScript",
"bytes": "101"
},
{
"name": "Python",
"bytes": "165860"
},
{
"name": "Shell",
"bytes": "631"
}
],
"symlink_target": ""
} |
import json
import os
from dataset.utils import fix_needless_new_line
def run(inputs: str, output: str):
output_abs = os.path.abspath(output)
data = []
for input_ in inputs:
input_abs = os.path.abspath(input_)
data += json.load(open(input_abs))
for i, data_per_paper in enumerate(data):
data[i]["summary"] = fix_needless_new_line(data[i]["summary"])
with open(output_abs, "w") as f_out:
json.dump(data, f_out)
| {
"content_hash": "3ab9a13fafb8ef70ff4ab2bc16b439b8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 25.88235294117647,
"alnum_prop": 0.6659090909090909,
"repo_name": "n-kats/arxiv2vec",
"id": "d6ea34cd16b1258010afa5cca973f8f29dda62d0",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataset/merge_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15549"
}
],
"symlink_target": ""
} |
"""
Blender API for querying mesh data. Animation data is also
handled here since Three.js associates the animation (skeletal,
morph targets) with the geometry nodes.
"""
import operator
import re
from bpy import data, types, context
from . import material, texture, animation
from . import object as object_
from .. import constants, utilities, logger, exceptions
def _mesh(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Mesh):
mesh = name
else:
mesh = data.meshes[name]
return func(mesh, *args, **kwargs)
return inner
@_mesh
def skeletal_animation(mesh, options):
"""
:param mesh:
:param options:
:rtype: []
"""
logger.debug("mesh.animation(%s, %s)", mesh, options)
armature = _armature(mesh)
if not armature:
logger.warning("No armature found (%s)", mesh)
return []
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
dispatch = {
constants.POSE: animation.pose_animation,
constants.REST: animation.rest_animation
}
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
animations = func(armature, options)
# armature.data.pose_position = pose_position
return animations
@_mesh
def bones(mesh, options):
"""
:param mesh:
:param options:
:rtype: [], {}
"""
logger.debug("mesh.bones(%s)", mesh)
armature = _armature(mesh)
if not armature:
return [], {}
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
if anim_type == constants.OFF:
logger.info("Animation type not set, defaulting "
"to using REST position for the armature.")
func = _rest_bones
# armature.data.pose_position = "REST"
else:
dispatch = {
constants.REST: _rest_bones,
constants.POSE: _pose_bones
}
logger.info("Using %s for the armature", anim_type)
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
bones_, bone_map = func(armature)
# armature.data.pose_position = pose_position
return (bones_, bone_map)
@_mesh
def buffer_normal(mesh):
"""
:param mesh:
:rtype: []
"""
normals_ = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z) if face.use_smooth else (face.normal.x, face.normal.y, face.normal.z)
normals_.extend(vector)
return normals_
@_mesh
def buffer_position(mesh):
"""
:param mesh:
:rtype: []
"""
position = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
vector = (vertex.co.x, vertex.co.y, vertex.co.z)
position.extend(vector)
return position
@_mesh
def buffer_uv(mesh, layer=0):
"""
:param mesh:
:param layer: (Default value = 0)
:rtype: []
"""
uvs_ = []
if len(mesh.uv_layers) <= layer:
return uvs_
for uv_data in mesh.uv_layers[layer].data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uvs_.extend(uv_tuple)
return uvs_
@_mesh
def extra_vertex_groups(mesh, patterns_string):
"""
Returns (name,index) tuples for the extra (non-skinning) vertex groups
matching the given patterns.
The patterns are comma-separated where the star character can be used
as a wildcard character sequence.
:param mesh:
:param patterns_string:
:rtype: []
"""
logger.debug("mesh._extra_vertex_groups(%s)", mesh)
pattern_re = None
extra_vgroups = []
if not patterns_string.strip():
return extra_vgroups
armature = _armature(mesh)
obj = object_.objects_using_mesh(mesh)[0]
for vgroup_index, vgroup in enumerate(obj.vertex_groups):
# Skip bone weights:
vgroup_name = vgroup.name
if armature:
is_bone_weight = False
for bone in armature.pose.bones:
if bone.name == vgroup_name:
is_bone_weight = True
break
if is_bone_weight:
continue
if pattern_re is None:
# Translate user-friendly patterns to a regular expression:
# Join the whitespace-stripped, initially comma-separated
# entries to alternatives. Escape all characters except
# the star and replace that one with '.*?'.
pattern_re = '^(?:' + '|'.join(
map(lambda entry:
'.*?'.join(map(re.escape, entry.strip().split('*'))),
patterns_string.split(','))) + ')$'
if not re.match(pattern_re, vgroup_name):
continue
extra_vgroups.append((vgroup_name, vgroup_index))
return extra_vgroups
@_mesh
def vertex_group_data(mesh, index):
"""
Return vertex group data for each vertex. Vertices not in the group
get a zero value.
:param mesh:
:param index:
"""
group_data = []
for vertex in mesh.vertices:
weight = None
for group in vertex.groups:
if group.group == index:
weight = group.weight
group_data.append(weight or 0.0)
return group_data
@_mesh
def buffer_vertex_group_data(mesh, index):
"""
Return vertex group data for each deindexed vertex. Vertices not in the
group get a zero value.
:param mesh:
:param index:
"""
group_data = []
for face in mesh.tessfaces:
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
weight = None
for group in vertex.groups:
if group.group == index:
weight = group.weight
group_data.append(weight or 0.0)
return group_data
@_mesh
def faces(mesh, options, material_list=None):
"""
:param mesh:
:param options:
:param material_list: (Default value = None)
"""
logger.debug("mesh.faces(%s, %s, materials=%s)",
mesh, options, materials)
material_list = material_list or []
vertex_uv = len(mesh.uv_textures) > 0
has_colors = len(mesh.vertex_colors) > 0
logger.info("Has UVs = %s", vertex_uv)
logger.info("Has vertex colours = %s", has_colors)
opt_colours = options[constants.COLORS] and has_colors
opt_uvs = options[constants.UVS] and vertex_uv
opt_materials = options.get(constants.FACE_MATERIALS)
opt_normals = options[constants.NORMALS]
logger.debug("Vertex colours enabled = %s", opt_colours)
logger.debug("UVS enabled = %s", opt_uvs)
logger.debug("Materials enabled = %s", opt_materials)
logger.debug("Normals enabled = %s", opt_normals)
uv_indices = _uvs(mesh)[1] if opt_uvs else None
vertex_normals = _normals(mesh) if opt_normals else None
vertex_colours = vertex_colors(mesh) if opt_colours else None
faces_data = []
colour_indices = {}
if vertex_colours:
logger.debug("Indexing colours")
for index, colour in enumerate(vertex_colours):
colour_indices[str(colour)] = index
normal_indices = {}
if vertex_normals:
logger.debug("Indexing normals")
for index, normal in enumerate(vertex_normals):
normal_indices[str(normal)] = index
logger.info("Parsing %d faces", len(mesh.tessfaces))
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count not in (3, 4):
logger.error("%d vertices for face %d detected",
vert_count,
face.index)
raise exceptions.NGonError("ngons are not supported")
mat_index = face.material_index is not None and opt_materials
mask = {
constants.QUAD: vert_count is 4,
constants.MATERIALS: mat_index,
constants.UVS: False,
constants.NORMALS: False,
constants.COLORS: False
}
face_data = []
face_data.extend([v for v in face.vertices])
if mask[constants.MATERIALS]:
for mat_index, mat in enumerate(material_list):
if mat[constants.DBG_INDEX] == face.material_index:
face_data.append(mat_index)
break
else:
logger.warning("Could not map the material index "
"for face %d" % face.index)
face_data.append(0) # default to index zero if there's a bad material
if uv_indices:
for index, uv_layer in enumerate(uv_indices):
layer = mesh.tessface_uv_textures[index]
for uv_data in layer.data[face.index].uv:
uv_tuple = (uv_data[0], uv_data[1])
uv_index = uv_layer[str(uv_tuple)]
face_data.append(uv_index)
mask[constants.UVS] = True
if vertex_normals:
for vertex in face.vertices:
normal = mesh.vertices[vertex].normal
normal = (normal.x, normal.y, normal.z) if face.use_smooth else (face.normal.x, face.normal.y, face.normal.z)
face_data.append(normal_indices[str(normal)])
mask[constants.NORMALS] = True
if vertex_colours:
colours = mesh.tessface_vertex_colors.active.data[face.index]
for each in (colours.color1, colours.color2, colours.color3):
each = utilities.rgb2int(each)
face_data.append(colour_indices[str(each)])
mask[constants.COLORS] = True
if mask[constants.QUAD]:
colour = utilities.rgb2int(colours.color4)
face_data.append(colour_indices[str(colour)])
face_data.insert(0, utilities.bit_mask(mask))
faces_data.extend(face_data)
return faces_data
@_mesh
def morph_targets(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.morph_targets(%s, %s)", mesh, options)
obj = object_.objects_using_mesh(mesh)[0]
original_frame = context.scene.frame_current
frame_step = options.get(constants.FRAME_STEP, 1)
scene_frames = range(context.scene.frame_start,
context.scene.frame_end+1,
frame_step)
morphs = []
for frame in scene_frames:
logger.info("Processing data at frame %d", frame)
context.scene.frame_set(frame, 0.0)
morphs.append([])
vertices_ = object_.extract_mesh(obj, options).vertices[:]
for vertex in vertices_:
morphs[-1].extend([vertex.co.x, vertex.co.y, vertex.co.z])
context.scene.frame_set(original_frame, 0.0)
morphs_detected = False
for index, each in enumerate(morphs):
if index is 0:
continue
morphs_detected = morphs[index-1] != each
if morphs_detected:
logger.info("Valid morph target data detected")
break
else:
logger.info("No valid morph data detected")
return []
manifest = []
for index, morph in enumerate(morphs):
manifest.append({
constants.NAME: 'animation_%06d' % index,
constants.VERTICES: morph
})
return manifest
@_mesh
def blend_shapes(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.blend_shapes(%s, %s)", mesh, options)
manifest = []
if mesh.shape_keys:
logger.info("mesh.blend_shapes -- there's shape keys")
key_blocks = mesh.shape_keys.key_blocks
for key in key_blocks.keys()[1:]: # skip "Basis"
logger.info("mesh.blend_shapes -- key %s", key)
morph = []
for d in key_blocks[key].data:
co = d.co
morph.append([co.x, co.y, co.z])
manifest.append({
constants.NAME: key,
constants.VERTICES: morph
})
else:
logger.debug("No valid blend_shapes detected")
return manifest
@_mesh
def animated_blend_shapes(mesh, name, options):
"""
:param mesh:
:param options:
"""
# let filter the name to only keep the node's name
# the two cases are '%sGeometry' and '%sGeometry.%d', and we want %s
name = re.search("^(.*)Geometry(\..*)?$", name).group(1)
logger.debug("mesh.animated_blend_shapes(%s, %s)", mesh, options)
tracks = []
shp = mesh.shape_keys
animCurves = shp.animation_data
if animCurves:
animCurves = animCurves.action.fcurves
for key in shp.key_blocks.keys()[1:]: # skip "Basis"
key_name = name+".morphTargetInfluences["+key+"]"
found_animation = False
data_path = 'key_blocks["'+key+'"].value'
values = []
if animCurves:
for fcurve in animCurves:
if fcurve.data_path == data_path:
for xx in fcurve.keyframe_points:
values.append({ "time": xx.co.x, "value": xx.co.y })
found_animation = True
break # no need to continue
if found_animation:
tracks.append({
constants.NAME: key_name,
constants.TYPE: "number",
constants.KEYS: values
});
return tracks
@_mesh
def materials(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.materials(%s, %s)", mesh, options)
indices = []
for face in mesh.tessfaces:
if face.material_index not in indices:
indices.append(face.material_index)
material_sets = [(mesh.materials[index], index) for index in indices]
materials_ = []
maps = options.get(constants.MAPS)
mix = options.get(constants.MIX_COLORS)
use_colors = options.get(constants.COLORS)
logger.info("Colour mix is set to %s", mix)
logger.info("Vertex colours set to %s", use_colors)
for mat, index in material_sets:
if mat == None: # undefined material for a specific index is skipped
continue
try:
dbg_color = constants.DBG_COLORS[index]
except IndexError:
dbg_color = constants.DBG_COLORS[0]
logger.info("Compiling attributes for %s", mat.name)
attributes = {
constants.COLOR_EMISSIVE: material.emissive_color(mat),
constants.SHADING: material.shading(mat),
constants.OPACITY: material.opacity(mat),
constants.TRANSPARENT: material.transparent(mat),
constants.VISIBLE: material.visible(mat),
constants.WIREFRAME: material.wireframe(mat),
constants.BLENDING: material.blending(mat),
constants.DEPTH_TEST: material.depth_test(mat),
constants.DEPTH_WRITE: material.depth_write(mat),
constants.DBG_NAME: mat.name,
constants.DBG_COLOR: dbg_color,
constants.DBG_INDEX: index
}
if use_colors:
colors = material.use_vertex_colors(mat)
attributes[constants.VERTEX_COLORS] = colors
if (use_colors and mix) or (not use_colors):
colors = material.diffuse_color(mat)
attributes[constants.COLOR_DIFFUSE] = colors
if attributes[constants.SHADING] == constants.PHONG:
logger.info("Adding specular attributes")
attributes.update({
constants.SPECULAR_COEF: material.specular_coef(mat),
constants.COLOR_SPECULAR: material.specular_color(mat)
})
if mesh.show_double_sided:
logger.info("Double sided is on")
attributes[constants.DOUBLE_SIDED] = True
materials_.append(attributes)
if not maps:
continue
diffuse = _diffuse_map(mat)
if diffuse:
logger.info("Diffuse map found")
attributes.update(diffuse)
light = _light_map(mat)
if light:
logger.info("Light map found")
attributes.update(light)
specular = _specular_map(mat)
if specular:
logger.info("Specular map found")
attributes.update(specular)
if attributes[constants.SHADING] == constants.PHONG:
normal = _normal_map(mat)
if normal:
logger.info("Normal map found")
attributes.update(normal)
bump = _bump_map(mat)
if bump:
logger.info("Bump map found")
attributes.update(bump)
return materials_
@_mesh
def normals(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.normals(%s)", mesh)
normal_vectors = []
for vector in _normals(mesh):
normal_vectors.extend(vector)
return normal_vectors
@_mesh
def skin_weights(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_weights(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 1)
@_mesh
def skin_indices(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_indices(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 0)
@_mesh
def texture_registration(mesh):
"""
:param mesh:
"""
logger.debug("mesh.texture_registration(%s)", mesh)
materials_ = mesh.materials or []
registration = {}
funcs = (
(constants.MAP_DIFFUSE, material.diffuse_map),
(constants.SPECULAR_MAP, material.specular_map),
(constants.LIGHT_MAP, material.light_map),
(constants.BUMP_MAP, material.bump_map),
(constants.NORMAL_MAP, material.normal_map)
)
def _registration(file_path, file_name):
"""
:param file_path:
:param file_name:
"""
return {
'file_path': file_path,
'file_name': file_name,
'maps': []
}
logger.info("found %d materials", len(materials_))
for mat in materials_:
for (key, func) in funcs:
tex = func(mat)
if tex is None:
continue
logger.info("%s has texture %s", key, tex.name)
file_path = texture.file_path(tex)
file_name = texture.file_name(tex)
reg = registration.setdefault(
utilities.hash(file_path),
_registration(file_path, file_name))
reg["maps"].append(key)
return registration
@_mesh
def uvs(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.uvs(%s)", mesh)
uvs_ = []
for layer in _uvs(mesh)[0]:
uvs_.append([])
logger.info("Parsing UV layer %d", len(uvs_))
for pair in layer:
uvs_[-1].extend(pair)
return uvs_
@_mesh
def vertex_colors(mesh):
"""
:param mesh:
"""
logger.debug("mesh.vertex_colors(%s)", mesh)
vertex_colours = []
try:
vertex_colour = mesh.tessface_vertex_colors.active.data
except AttributeError:
logger.info("No vertex colours found")
return
for face in mesh.tessfaces:
colours = (vertex_colour[face.index].color1,
vertex_colour[face.index].color2,
vertex_colour[face.index].color3,
vertex_colour[face.index].color4)
for colour in colours:
colour = utilities.rgb2int((colour.r, colour.g, colour.b))
if colour not in vertex_colours:
vertex_colours.append(colour)
return vertex_colours
@_mesh
def vertices(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.vertices(%s)", mesh)
vertices_ = []
for vertex in mesh.vertices:
vertices_.extend((vertex.co.x, vertex.co.z, -vertex.co.y))
return vertices_
def _normal_map(mat):
"""
:param mat:
"""
tex = material.normal_map(mat)
if tex is None:
return
logger.info("Found normal texture map %s", tex.name)
normal = {
constants.MAP_NORMAL:
texture.file_name(tex),
constants.MAP_NORMAL_FACTOR:
material.normal_scale(mat),
constants.MAP_NORMAL_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_NORMAL_WRAP: texture.wrap(tex),
constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
}
return normal
def _bump_map(mat):
"""
:param mat:
"""
tex = material.bump_map(mat)
if tex is None:
return
logger.info("Found bump texture map %s", tex.name)
bump = {
constants.MAP_BUMP:
texture.file_name(tex),
constants.MAP_BUMP_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_BUMP_WRAP: texture.wrap(tex),
constants.MAP_BUMP_REPEAT: texture.repeat(tex),
constants.MAP_BUMP_SCALE:
material.bump_scale(mat),
}
return bump
def _specular_map(mat):
"""
:param mat:
"""
tex = material.specular_map(mat)
if tex is None:
return
logger.info("Found specular texture map %s", tex.name)
specular = {
constants.MAP_SPECULAR:
texture.file_name(tex),
constants.MAP_SPECULAR_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
}
return specular
def _light_map(mat):
"""
:param mat:
"""
tex = material.light_map(mat)
if tex is None:
return
logger.info("Found light texture map %s", tex.name)
light = {
constants.MAP_LIGHT:
texture.file_name(tex),
constants.MAP_LIGHT_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_LIGHT_WRAP: texture.wrap(tex),
constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
}
return light
def _diffuse_map(mat):
"""
:param mat:
"""
tex = material.diffuse_map(mat)
if tex is None:
return
logger.info("Found diffuse texture map %s", tex.name)
diffuse = {
constants.MAP_DIFFUSE:
texture.file_name(tex),
constants.MAP_DIFFUSE_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
}
return diffuse
def _normals(mesh):
"""
:param mesh:
:rtype: []
"""
vectors = []
vectors_ = {}
for face in mesh.tessfaces:
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z) if face.use_smooth else (face.normal.x, face.normal.y, face.normal.z)
str_vec = str(vector)
try:
vectors_[str_vec]
except KeyError:
vectors.append(vector)
vectors_[str_vec] = True
return vectors
def _uvs(mesh):
"""
:param mesh:
:rtype: [[], ...], [{}, ...]
"""
uv_layers = []
uv_indices = []
for layer in mesh.uv_layers:
uv_layers.append([])
uv_indices.append({})
index = 0
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uv_key = str(uv_tuple)
try:
uv_indices[-1][uv_key]
except KeyError:
uv_indices[-1][uv_key] = index
uv_layers[-1].append(uv_tuple)
index += 1
return uv_layers, uv_indices
def _armature(mesh):
"""
:param mesh:
"""
obj = object_.objects_using_mesh(mesh)[0]
armature = obj.find_armature()
if armature:
logger.info("Found armature %s for %s", armature.name, obj.name)
else:
logger.info("Found no armature for %s", obj.name)
return armature
def _skinning_data(mesh, bone_map, influences, array_index):
"""
:param mesh:
:param bone_map:
:param influences:
:param array_index:
"""
armature = _armature(mesh)
manifest = []
if not armature:
return manifest
obj = object_.objects_using_mesh(mesh)[0]
logger.debug("Skinned object found %s", obj.name)
for vertex in mesh.vertices:
bone_array = []
for group in vertex.groups:
bone_array.append((group.group, group.weight))
bone_array.sort(key=operator.itemgetter(1), reverse=True)
for index in range(influences):
if index >= len(bone_array):
manifest.append(0)
continue
name = obj.vertex_groups[bone_array[index][0]].name
for bone_index, bone in enumerate(armature.pose.bones):
if bone.name != name:
continue
if array_index is 0:
entry = bone_map.get(bone_index, -1)
else:
entry = bone_array[index][1]
manifest.append(entry)
break
else:
manifest.append(0)
return manifest
def _pose_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
armature_matrix = armature.matrix_world
for bone_count, pose_bone in enumerate(armature.pose.bones):
armature_bone = pose_bone.bone
bone_index = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_bone = armature_bone.parent
parent_matrix = armature_matrix * parent_bone.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = index = 0
for pose_parent in armature.pose.bones:
armature_parent = pose_parent.bone.name
if armature_parent == parent_bone.name:
bone_index = index
index += 1
bone_map[bone_count] = bone_count
pos, rot, scl = bone_matrix.decompose()
bones_.append({
constants.PARENT: bone_index,
constants.NAME: armature_bone.name,
constants.POS: (pos.x, pos.z, -pos.y),
constants.ROTQ: (rot.x, rot.z, -rot.y, rot.w),
constants.SCL: (scl.x, scl.z, scl.y)
})
return bones_, bone_map
def _rest_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
bone_index_rel = 0
for bone in armature.data.bones:
logger.info("Parsing bone %s", bone.name)
if not bone.use_deform:
logger.debug("Ignoring bone %s at: %d",
bone.name, bone_index_rel)
continue
if bone.parent is None:
bone_pos = bone.head_local
bone_index = -1
else:
bone_pos = bone.head_local - bone.parent.head_local
bone_index = 0
index = 0
for parent in armature.data.bones:
if parent.name == bone.parent.name:
bone_index = bone_map.get(index)
index += 1
bone_world_pos = armature.matrix_world * bone_pos
x_axis = bone_world_pos.x
y_axis = bone_world_pos.z
z_axis = -bone_world_pos.y
logger.debug("Adding bone %s at: %s, %s",
bone.name, bone_index, bone_index_rel)
bone_map[bone_count] = bone_index_rel
bone_index_rel += 1
# @TODO: the rotq probably should not have these
# hard coded values
bones_.append({
constants.PARENT: bone_index,
constants.NAME: bone.name,
constants.POS: (x_axis, y_axis, z_axis),
constants.ROTQ: (0, 0, 0, 1)
})
bone_count += 1
return (bones_, bone_map)
| {
"content_hash": "e3aaf872166e704046d5be2cc9cfd4e5",
"timestamp": "",
"source": "github",
"line_count": 1101,
"max_line_length": 125,
"avg_line_length": 26.24704813805631,
"alnum_prop": 0.5636030175098623,
"repo_name": "MasterJames/three.js",
"id": "1f46962634022dc5546076d4c3d205e17b31a7f1",
"size": "28898",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "utils/exporters/blender/addons/io_three/exporter/api/mesh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1131"
},
{
"name": "C",
"bytes": "80088"
},
{
"name": "C++",
"bytes": "116991"
},
{
"name": "CSS",
"bytes": "21422"
},
{
"name": "GLSL",
"bytes": "84118"
},
{
"name": "HTML",
"bytes": "33829"
},
{
"name": "JavaScript",
"bytes": "3678816"
},
{
"name": "MAXScript",
"bytes": "75494"
},
{
"name": "Python",
"bytes": "510300"
},
{
"name": "Shell",
"bytes": "10183"
}
],
"symlink_target": ""
} |
import numpy as np
__all__ = ["points_normals_from" ,"ply_from_array"]
def points_normals_from(filename):
array = np.genfromtxt(filename)
return array[:,0:3], array[:,3:6]
def ply_from_array(points, faces, output_file):
num_points = len(points)
num_triangles = len(faces)
header = """ply
format ascii 1.0
element vertex {0}
property float x
property float y
property float z
element face {1}
property list uchar int vertex_indices
end_header\n""".format(num_points, num_triangles)
with open(output_file,'wb') as f:
f.write(header.encode())
for idx, item in enumerate(points):
f.write("{0:0.6f} {1:0.6f} {2:0.6f}\n".format(item[0],item[1], item[2]).encode())
for item in faces:
number = len(item)
row = "{0}".format(number)
for elem in item:
row += " {0} ".format(elem)
row += "\n"
f.write(row.encode())
| {
"content_hash": "001ba3a58fa86f733af264a14f9d29e2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 93,
"avg_line_length": 23.875,
"alnum_prop": 0.5832460732984294,
"repo_name": "mmolero/pypoisson",
"id": "4c6ce468313613ba8a2c1fcf1284109ffad5f9dc",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/ply_from_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9122"
}
],
"symlink_target": ""
} |
'''
baseline:
after: true
before: false
counts: 30
detector: H1
mass: 39.59
default_fits: nominal
multicollect:
counts: 60
detector: H1
isotope: Ar40
peakcenter:
after: false
before: false
detector: H1
isotope: Ar40
equilibration:
inlet: R
outlet: O
inlet_delay: 3
eqtime: 20
use_extraction_eqtime: True
whiff:
eqtime: 10
counts: 10
abbreviated_count_ratio: 0.25
conditionals:
- action: run
attr: Ar40
teststr: Ar40>10
- action: run_remainder
teststr: Ar40<=10
attr: Ar40
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2', 'CDD')
#FITS=('Ar41:linear','Ar40:linear', 'Ar39:parabolic','Ar38:parabolic','Ar37:parabolic','Ar36:parabolic')
def main():
#display information with info(msg)
info('unknown measurement script')
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
#open a plot panel for this detectors
activate_detectors(*ACTIVE_DETECTORS)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)
#position mass spectrometer
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#gas is staged behind inlet
meqtime = mx.whiff.eqtime
equil(meqtime, False)
result = whiff(ncounts=mx.whiff.counts, conditionals=mx.whiff.conditionals)
info('Whiff result={}'.format(result))
wab=1.0
if result=='run':
info('Continuing whiff measurment')
post_equilibration()
wab = mx.whiff.abbreviated_count_ratio
elif result=='run_remainder':
info('Measuring remainder instead')
reset_measurement(ACTIVE_DETECTORS)
#pump out spectrometer
#open(mx.equilibration.outlet)
#sleep(15)
#open co2 chamber
open('T')
#equilibrate with entire section
equil(eqtime)
multicollect(ncounts=mx.multicollect.counts*wab, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts*wab, mass=mx.baseline.mass, detector=mx.baseline.detector)
if mx.peakcenter.after:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
info('finished measure script')
def equil(eqt, do_post=True):
#post equilibration script triggered after eqtime elapsed
#equilibrate is non blocking
#so use either a sniff of sleep as a placeholder until eq finished
equilibrate(eqtime=eqt, do_post_equilibration=do_post,
inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet)
#equilibrate returns immediately after the inlet opens
set_time_zero(0)
sniff(eqt)
#set default regression
set_fits()
set_baseline_fits()
#========================EOF==============================================================
#peak_hop(detector='CDD', isotopes=['Ar40','Ar39','Ar36'], cycles=2, integrations=3)
#baselines(counts=50,mass=0.5, detector='CDD')s
#isolate sniffer volume
# close('S')
# sleep(1)
#
# #open to mass spec
# open('R')
#
# set_time_zero()
# #display pressure wave
# sniff(5)
#
# #define sniff/split threshold
# sniff_threshold=100
#
# #test condition
# #if get_intensity('H1')>sniff_threshold:
# if True:
# gosub('splits:jan_split', klass='ExtractionLinePyScript')
#
| {
"content_hash": "ac098204df1247e8f908a61dda6cb827",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 104,
"avg_line_length": 26.565891472868216,
"alnum_prop": 0.648964108549752,
"repo_name": "NMGRL/pychron",
"id": "bea12508409bb97a0caf8e91d5f2b1515bdada66",
"size": "3441",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "docs/user_guide/operation/scripts/examples/argus/measurement/jan_co2_whiff_measure.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
from ._operations import AppComponentOperations
from ._operations import ServerMetricsOperations
from ._operations import TestOperations
from ._operations import TestRunOperations
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AppComponentOperations",
"ServerMetricsOperations",
"TestOperations",
"TestRunOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "c0c2cad87f735947edafc103fe0a8c34",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 31.88235294117647,
"alnum_prop": 0.7324723247232472,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b602fd14d33620451908ff87afc39fc73d8f13a1",
"size": "1010",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/loadtestservice/azure-developer-loadtesting/azure/developer/loadtesting/operations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import base64
import hashlib
import pickle
import time
import uuid
from abc import ABCMeta, abstractmethod
from collections import defaultdict
_MetaDatabase = ABCMeta('_MetaDatabase', (object,), {})
class ABCDatabase(_MetaDatabase):
class Conflict(Exception):
"""Raises in case of conflict updates"""
class NotFound(Exception):
"""Raises in case attempt to query on missed document"""
def __init__(self, name):
self._name = name
self._start_time = int(time.time() * 10**6)
self._update_seq = 0
@property
def name(self):
"""Returns database symbolic name as string"""
return self._name
@property
def start_time(self):
"""Returns database start time in microseconds"""
return self._start_time
@property
def update_seq(self):
"""Returns current update sequence value"""
return self._update_seq
def info(self):
"""Returns database information object as dict"""
return {
'db_name': self.name,
'instance_start_time': str(self.start_time),
'update_seq': self.update_seq
}
@abstractmethod
def contains(self, idx, rev=None):
"""Verifies that document with specified idx exists"""
@abstractmethod
def check_for_conflicts(self, idx, rev):
"""Check that specified idx and rev provides no conflicts
or raises Conflict exception otherwise"""
@abstractmethod
def load(self, idx, rev=None):
"""Returns document by specified idx"""
@abstractmethod
def store(self, doc, rev=None):
"""Creates document or updates if rev specified"""
@abstractmethod
def remove(self, idx, rev):
"""Removes document by specified idx and rev"""
@abstractmethod
def revs_diff(self, idrevs):
"""Returns missed revisions for specified id - revs mapping"""
@abstractmethod
def bulk_docs(self, docs, new_edits=True):
"""Bulk update docs"""
@abstractmethod
def ensure_full_commit(self):
"""Ensures that all changes are actually stored on disk"""
@abstractmethod
def changes(self, since=0, feed='normal', style='all_docs', filter=None):
"""Ensures that all changes are actually stored on disk"""
@abstractmethod
def add_attachment(self, doc, name, data, ctype='application/octet-stream'):
"""Adds attachment to specified document"""
class MemoryDatabase(ABCDatabase):
def __init__(self, *args, **kwargs):
super(MemoryDatabase, self).__init__(*args, **kwargs)
self._docs = {}
self._changes = {}
def _new_rev(self, doc):
oldrev = doc.get('_rev')
if oldrev is None:
seq, _ = 0, None
else:
seq, _ = oldrev.split('-', 1)
seq = int(seq)
sig = hashlib.md5(pickle.dumps(doc)).hexdigest()
newrev = '%d-%s' % (seq + 1, sig)
return newrev.lower()
def check_for_conflicts(self, idx, rev):
if self.contains(idx):
if rev is None:
if idx.startswith('_local/'):
return
raise self.Conflict('Document update conflict')
elif not self.contains(idx, rev):
raise self.Conflict('Document update conflict')
elif rev is not None:
raise self.Conflict('Document update conflict')
def contains(self, idx, rev=None):
if idx not in self._docs:
return False
doc = self._docs[idx]
if rev is None:
return not doc.get('_deleted', False)
return self._docs[idx]['_rev'] == rev
def load(self, idx, rev=None):
if not self.contains(idx, rev):
raise self.NotFound(idx)
return self._docs[idx]
def store(self, doc, rev=None, new_edits=True):
if '_id' not in doc:
doc['_id'] = str(uuid.uuid4()).lower()
if rev is None:
rev = doc.get('_rev')
idx = doc['_id']
if new_edits:
self.check_for_conflicts(idx, rev)
doc['_rev'] = self._new_rev(doc)
else:
assert rev, 'Document revision missed'
doc['_rev'] = rev
idx, rev = doc['_id'], doc['_rev']
self._docs[idx] = doc
self._update_seq += 1
self._changes[idx] = self._update_seq
return idx, rev
def remove(self, idx, rev):
if not self.contains(idx):
raise self.NotFound(idx)
elif not self.contains(idx, rev):
raise self.Conflict('Document update conflict')
doc = {
'_id': idx,
'_rev': rev,
'_deleted': True
}
return self.store(doc, rev)
def revs_diff(self, idrevs):
res = defaultdict(dict)
for idx, revs in idrevs.items():
missing = []
if not self.contains(idx):
missing.extend(revs)
res[idx]['missing'] = missing
continue
doc = self._docs[idx]
for rev in revs:
if doc['_rev'] != rev:
missing.append(rev)
if missing:
res[idx]['missing'] = missing
return res
def bulk_docs(self, docs, new_edits=True):
res = []
for doc in docs:
try:
idx, rev = self.store(doc, None, new_edits)
res.append({
'ok': True,
'id': idx,
'rev': rev
})
except Exception as err:
res.append({'id': doc.get('_id'),
'error': type(err).__name__,
'reason': str(err)})
return res
def ensure_full_commit(self):
return {
'ok': True,
'instance_start_time': self.info()['instance_start_time']
}
def changes(self, since=0, feed='normal', style='all_docs', filter=None):
changes = sorted(self._changes.items(), key=lambda i: i[1])
if since:
for idx, seq in changes:
if since <= seq:
yield self.make_event(idx, seq)
break
for idx, seq in changes:
yield self.make_event(idx, seq)
def add_attachment(self, doc, name, data, ctype='application/octet-stream'):
atts = doc.setdefault('_attachments')
digest = 'md5-%s' % base64.b64encode(hashlib.md5(data).digest()).decode()
if doc.get('_rev'):
revpos = int(doc['_rev'].split('-')[0]) + 1
else:
revpos = 1
atts[name] = {
'data': data,
'digest': digest,
'length': len(data),
'content_type': ctype,
'revpos': revpos
}
def make_event(self, idx, seq):
doc = self._docs[idx]
event = {
'id': idx,
'changes': [{'rev': doc['_rev']}],
'seq': seq
}
if doc.get('_deleted'):
event['_deleted'] = True
return event
| {
"content_hash": "89637f59202274f7a11db1dbf57307cd",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 81,
"avg_line_length": 30.008403361344538,
"alnum_prop": 0.5278633436012321,
"repo_name": "kxepal/replipy",
"id": "364aee2a6924ff49c6be029bb6cd99cf89b11b22",
"size": "7359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "replipy/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28205"
}
],
"symlink_target": ""
} |
from ..libs.view_helpers import *
from ..libs import log
from .base_command import TypeScriptBaseTextCommand
class TypescriptFormatOnKey(TypeScriptBaseTextCommand):
"""
Format on ";", "}", or "\n"; called by typing these keys in a ts file
in the case of "\n", this is only called when no completion dialogue is visible
"""
def run(self, text, key="", insert_key=True):
log.debug("running TypescriptFormatOnKey")
if 0 == len(key):
return
check_update_view(self.view)
format_response = cli.service.format_on_key(self.view.file_name(), get_location_from_view(self.view), key)
if format_response["success"]:
# logger.log.debug(str(formatResp))
code_edits = format_response["body"]
apply_formatting_changes(text, self.view, code_edits)
class TypescriptFormatSelection(TypeScriptBaseTextCommand):
"""Command to format the current selection"""
def run(self, text):
log.debug("running TypescriptFormatSelection")
r = self.view.sel()[0]
format_range(text, self.view, r.begin(), r.end())
class TypescriptFormatDocument(TypeScriptBaseTextCommand):
"""Command to format the entire buffer"""
def run(self, text):
log.debug("running TypescriptFormatDocument")
format_range(text, self.view, 0, self.view.size())
class TypescriptFormatLine(TypeScriptBaseTextCommand):
"""Command to format the current line"""
def run(self, text):
log.debug("running TypescriptFormatLine")
line_region = self.view.line(self.view.sel()[0])
line_text = self.view.substr(line_region)
if NON_BLANK_LINE_PATTERN.search(line_text):
format_range(text, self.view, line_region.begin(), line_region.end())
else:
position = self.view.sel()[0].begin()
line, offset = self.view.rowcol(position)
if line > 0:
self.view.run_command('typescript_format_on_key', {"key": "\n", "insert_key": False})
class TypescriptFormatBrackets(TypeScriptBaseTextCommand):
def run(self, text):
log.debug("running TypescriptFormatBrackets")
check_update_view(self.view)
sel = self.view.sel()
if len(sel) == 1:
original_pos = sel[0].begin()
bracket_char = self.view.substr(original_pos)
if bracket_char != "}":
self.view.run_command('move_to', {"to": "brackets"})
bracket_pos = self.view.sel()[0].begin()
bracket_char = self.view.substr(bracket_pos)
if bracket_char == "}":
self.view.run_command('move', {"by": "characters", "forward": True})
self.view.run_command('typescript_format_on_key', {"key": "}", "insert_key": False})
self.view.run_command('move', {"by": "characters", "forward": True})
class TypescriptPasteAndFormat(TypeScriptBaseTextCommand):
def run(self, text):
log.debug("running TypescriptPasteAndFormat")
view = self.view
check_update_view(view)
regions_before_paste = regions_to_static_regions(view.sel())
if IS_ST2:
view.add_regions("apresPaste", copy_regions(view.sel()), "", "", sublime.HIDDEN)
else:
view.add_regions("apresPaste", copy_regions(view.sel()), flags=sublime.HIDDEN)
view.run_command("paste")
regions_after_paste = view.get_regions("apresPaste")
view.erase_regions("apresPaste")
for rb, ra in zip(regions_before_paste, regions_after_paste):
line_start = view.line(rb.begin()).begin()
line_end = view.line(ra.begin()).end()
format_range(text, view, line_start, line_end)
class TypescriptAutoIndentOnEnterBetweenCurlyBrackets(TypeScriptBaseTextCommand):
"""
Handle the case of hitting enter between {} to auto indent and format
"""
def run(self, text):
log.debug("running TypescriptAutoIndentOnEnterBetweenCurlyBrackets")
view = self.view
view.run_command('typescript_format_on_key', {"key": "\n"})
loc = view.sel()[0].begin()
row, offset = view.rowcol(loc)
tab_size = view.settings().get('tab_size')
brace_offset = offset
ws = ""
for i in range(tab_size):
ws += ' '
ws += "\n"
for i in range(brace_offset):
ws += ' '
# insert the whitespace
insert_text(view, text, loc, ws)
set_caret_pos(view, loc + tab_size) | {
"content_hash": "9744e4cf2fc88f65f9d7c28cd123ae70",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 114,
"avg_line_length": 39.85964912280702,
"alnum_prop": 0.6126760563380281,
"repo_name": "hoanhtien/TypeScript-Sublime-Plugin",
"id": "9f03a76b269039955525df3d39e4bb39cbe3d553",
"size": "4544",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "typescript/commands/format.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "497"
},
{
"name": "JavaScript",
"bytes": "3742850"
},
{
"name": "Python",
"bytes": "156111"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import imp
import optparse
import os
import pipes
ycm_module_path = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../vim/chromium.ycm_extra_conf.py'))
ycm_extra_conf = imp.load_source('ycm_extra_conf', ycm_module_path)
def main():
usage = "usage: %prog [options] file"
parser = optparse.OptionParser(usage)
parser.add_option("-d", "--depot_tools", dest="depot_path",
help="path to depot_tools")
(options, args) = parser.parse_args()
if options.depot_path:
os.environ["PATH"] += ":%s" % options.depot_path
if len(args) != 1:
parser.error("incorrect number of arguments")
path = os.path.realpath(args[0])
results = ycm_extra_conf.FlagsForFile(path)
for flag in results['flags']:
# The sublimeclang plugin expects to parse its input with shlex.
# Defines and include path names may have spaces or quotes.
print(pipes.quote(flag))
if __name__ == "__main__":
main()
| {
"content_hash": "519142e89e157de3768006617833f873",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 68,
"avg_line_length": 29.470588235294116,
"alnum_prop": 0.6656686626746507,
"repo_name": "ric2b/Vivaldi-browser",
"id": "6912b6f3c0fc40bad80b2648c478fbd47160c37c",
"size": "1620",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/tools/sublime/ninja_options_script.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
App Engine config
"""
"""
def gae_mini_profiler_should_profile_production():
# Uncomment the first two lines to enable GAE Mini Profiler on production for admin accounts
# from google.appengine.api import users
# return users.is_current_user_admin()
return False
def webapp_add_wsgi_middleware(app):
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
"""
| {
"content_hash": "ef5da14608100c66ceba3be4a92c269d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 96,
"avg_line_length": 26.352941176470587,
"alnum_prop": 0.7254464285714286,
"repo_name": "expl0si0nn/cf-tracker",
"id": "193ebb0f68d043e54b7eb1fc01d15a64765428c0",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/appengine_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "5091"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "940105"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
from _felzenszwalb_cy import _felzenszwalb_grey
def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):
"""Computes Felsenszwalb's efficient graph based image segmentation.
Produces an oversegmentation of a multichannel (i.e. RGB) image
using a fast, minimum spanning tree based clustering on the image grid.
The parameter ``scale`` sets an observation level. Higher scale means
less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,
used for smoothing the image prior to segmentation.
The number of produced segments as well as their size can only be
controlled indirectly through ``scale``. Segment size within an image can
vary greatly depending on local contrast.
For RGB images, the algorithm computes a separate segmentation for each
channel and then combines these. The combined segmentation is the
intersection of the separate segmentations on the color channels.
Parameters
----------
image : (width, height, 3) or (width, height) ndarray
Input image.
scale : float
Free parameter. Higher means larger clusters.
sigma : float
Width of Gaussian kernel used in preprocessing.
min_size : int
Minimum component size. Enforced using postprocessing.
Returns
-------
segment_mask : (width, height) ndarray
Integer mask indicating segment labels.
References
----------
.. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and
Huttenlocher, D.P. International Journal of Computer Vision, 2004
"""
#image = img_as_float(image)
if image.ndim == 2:
# assume single channel image
return _felzenszwalb_grey(image, scale=scale, sigma=sigma)
elif image.ndim != 3:
raise ValueError("Felzenswalb segmentation can only operate on RGB and"
" grey images, but input array of ndim %d given."
% image.ndim)
# assume we got 2d image with multiple channels
n_channels = image.shape[2]
if n_channels != 3:
warnings.warn("Got image with %d channels. Is that really what you"
" wanted?" % image.shape[2])
segmentations = []
# compute quickshift for each channel
for c in range(n_channels):
channel = np.ascontiguousarray(image[:, :, c])
s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,
min_size=min_size)
segmentations.append(s)
# put pixels in same segment only if in the same segment in all images
# we do this by combining the channels to one number
n0 = segmentations[0].max() + 1
n1 = segmentations[1].max() + 1
segmentation = (segmentations[0] + segmentations[1] * n0
+ segmentations[2] * n0 * n1)
# make segment labels consecutive numbers starting at 0
labels = np.unique(segmentation, return_inverse=True)[1]
return labels.reshape(image.shape[:2])
| {
"content_hash": "3c98b9e80948982155d808b919c43a82",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 39.33766233766234,
"alnum_prop": 0.6599537801254539,
"repo_name": "fgregg/felzenszwalb",
"id": "a3a692cf6892cb72bdf1e096807dee0470c7dea4",
"size": "3029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "felzenszwalb/_felzenszwalb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28799"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name="index.html"), name='index'),
(r'^accounts/', include('allauth.urls')),
)
# urlpatterns += patterns('django.contrib.auth.views',
# # url(r'^login/$',
# # 'login',
# # {'template_name': 'users/login.html', },
# # name='login'
# # ),
# # url(r'^logout/$',
# # 'logout',
# # {'template_name': 'users/login.html'},
# # name='logout'
# # ),
# )
| {
"content_hash": "6384bf23670ccd3b656a4dd2165c790d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 22.64516129032258,
"alnum_prop": 0.5641025641025641,
"repo_name": "arruda/sauron",
"id": "efa21a74c00c8db6198fba325dea5640e08546ce",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sauron/sauron/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Puppet",
"bytes": "131877"
},
{
"name": "Python",
"bytes": "18170"
},
{
"name": "Ruby",
"bytes": "344643"
},
{
"name": "Shell",
"bytes": "9019"
}
],
"symlink_target": ""
} |
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
mode:
required: false
default: null
choices: []
description:
- mode the file or directory should be, such as 0644 as would be fed to I(chmod). As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
owner:
required: false
default: null
choices: []
description:
- name of the user that should own the file/directory, as would be fed to I(chown)
group:
required: false
default: null
choices: []
description:
- name of the group that should own the file/directory, as would be fed to I(chown)
seuser:
required: false
default: null
choices: []
description:
- user part of SELinux file context. Will default to system policy, if
applicable. If set to C(_default), it will use the C(user) portion of the
policy if available
serole:
required: false
default: null
choices: []
description:
- role part of SELinux file context, C(_default) feature works as for I(seuser).
setype:
required: false
default: null
choices: []
description:
- type part of SELinux file context, C(_default) feature works as for I(seuser).
selevel:
required: false
default: "s0"
choices: []
description:
- level part of the SELinux file context. This is the MLS/MCS attribute,
sometimes known as the C(range). C(_default) feature works as for
I(seuser).
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.8"
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
"""
| {
"content_hash": "a85830cbb9132a0897fd5daa53f8a22d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 195,
"avg_line_length": 29.916666666666668,
"alnum_prop": 0.6456824512534819,
"repo_name": "marcusramberg/dotfiles",
"id": "adff1f2f1bf2a878abfebe94e736c50872b61455",
"size": "2491",
"binary": false,
"copies": "49",
"ref": "refs/heads/main",
"path": "bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/utils/module_docs_fragments/files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "4704"
},
{
"name": "Emacs Lisp",
"bytes": "66056"
},
{
"name": "JavaScript",
"bytes": "11846"
},
{
"name": "Jinja",
"bytes": "285"
},
{
"name": "Lua",
"bytes": "136578"
},
{
"name": "Nix",
"bytes": "9136"
},
{
"name": "Perl",
"bytes": "8914"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "9699218"
},
{
"name": "Ruby",
"bytes": "24218"
},
{
"name": "Shell",
"bytes": "416759"
},
{
"name": "Vim Script",
"bytes": "4033"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: tower
plugin_type: inventory
author:
- Matthew Jones (@matburt)
- Yunfan Zhang (@YunfanZhang42)
short_description: Ansible dynamic inventory plugin for Ansible Tower.
description:
- Reads inventories from Ansible Tower.
- Supports reading configuration from both YAML config file and environment variables.
- If reading from the YAML file, the file name must end with tower.(yml|yaml) or tower_inventory.(yml|yaml),
the path in the command would be /path/to/tower_inventory.(yml|yaml). If some arguments in the config file
are missing, this plugin will try to fill in missing arguments by reading from environment variables.
- If reading configurations from environment variables, the path in the command must be @tower_inventory.
extends_documentation_fragment: awx.awx.auth_plugin
options:
inventory_id:
description:
- The ID of the Ansible Tower inventory that you wish to import.
- This is allowed to be either the inventory primary key or its named URL slug.
- Primary key values will be accepted as strings or integers, and URL slugs must be strings.
- Named URL slugs follow the syntax of "inventory_name++organization_name".
type: raw
env:
- name: TOWER_INVENTORY
required: True
include_metadata:
description: Make extra requests to provide all group vars with metadata about the source Ansible Tower host.
type: bool
default: False
'''
EXAMPLES = '''
# Before you execute the following commands, you should make sure this file is in your plugin path,
# and you enabled this plugin.
# Example for using tower_inventory.yml file
plugin: awx.awx.tower
host: your_ansible_tower_server_network_address
username: your_ansible_tower_username
password: your_ansible_tower_password
inventory_id: the_ID_of_targeted_ansible_tower_inventory
# Then you can run the following command.
# If some of the arguments are missing, Ansible will attempt to read them from environment variables.
# ansible-inventory -i /path/to/tower_inventory.yml --list
# Example for reading from environment variables:
# Set environment variables:
# export TOWER_HOST=YOUR_TOWER_HOST_ADDRESS
# export TOWER_USERNAME=YOUR_TOWER_USERNAME
# export TOWER_PASSWORD=YOUR_TOWER_PASSWORD
# export TOWER_INVENTORY=THE_ID_OF_TARGETED_INVENTORY
# Read the inventory specified in TOWER_INVENTORY from Ansible Tower, and list them.
# The inventory path must always be @tower_inventory if you are reading all settings from environment variables.
# ansible-inventory -i @tower_inventory --list
'''
import os
from ansible.module_utils import six
from ansible.module_utils._text import to_text, to_native
from ansible.errors import AnsibleParserError, AnsibleOptionsError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.config.manager import ensure_type
from ..module_utils.tower_api import TowerAPIModule
def handle_error(**kwargs):
raise AnsibleParserError(to_native(kwargs.get('msg')))
class InventoryModule(BaseInventoryPlugin):
NAME = 'awx.awx.tower' # REPLACE
# Stays backward compatible with tower inventory script.
# If the user supplies '@tower_inventory' as path, the plugin will read from environment variables.
no_config_file_supplied = False
def verify_file(self, path):
if path.endswith('@tower_inventory'):
self.no_config_file_supplied = True
return True
elif super(InventoryModule, self).verify_file(path):
return path.endswith(('tower_inventory.yml', 'tower_inventory.yaml', 'tower.yml', 'tower.yaml'))
else:
return False
def warn_callback(self, warning):
self.display.warning(warning)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
if not self.no_config_file_supplied and os.path.isfile(path):
self._read_config_data(path)
# Defer processing of params to logic shared with the modules
module_params = {}
for plugin_param, module_param in TowerAPIModule.short_params.items():
opt_val = self.get_option(plugin_param)
if opt_val is not None:
module_params[module_param] = opt_val
module = TowerAPIModule(
argument_spec={}, direct_params=module_params,
error_callback=handle_error, warn_callback=self.warn_callback
)
# validate type of inventory_id because we allow two types as special case
inventory_id = self.get_option('inventory_id')
if isinstance(inventory_id, int):
inventory_id = to_text(inventory_id, nonstring='simplerepr')
else:
try:
inventory_id = ensure_type(inventory_id, 'str')
except ValueError as e:
raise AnsibleOptionsError(
'Invalid type for configuration option inventory_id, '
'not integer, and cannot convert to string: {err}'.format(err=to_native(e))
)
inventory_id = inventory_id.replace('/', '')
inventory_url = '/api/v2/inventories/{inv_id}/script/'.format(inv_id=inventory_id)
inventory = module.get_endpoint(
inventory_url, data={'hostvars': '1', 'towervars': '1', 'all': '1'}
)['json']
# To start with, create all the groups.
for group_name in inventory:
if group_name != '_meta':
self.inventory.add_group(group_name)
# Then, create all hosts and add the host vars.
all_hosts = inventory['_meta']['hostvars']
for host_name, host_vars in six.iteritems(all_hosts):
self.inventory.add_host(host_name)
for var_name, var_value in six.iteritems(host_vars):
self.inventory.set_variable(host_name, var_name, var_value)
# Lastly, create to group-host and group-group relationships, and set group vars.
for group_name, group_content in six.iteritems(inventory):
if group_name != 'all' and group_name != '_meta':
# First add hosts to groups
for host_name in group_content.get('hosts', []):
self.inventory.add_host(host_name, group_name)
# Then add the parent-children group relationships.
for child_group_name in group_content.get('children', []):
self.inventory.add_child(group_name, child_group_name)
# Set the group vars. Note we should set group var for 'all', but not '_meta'.
if group_name != '_meta':
for var_name, var_value in six.iteritems(group_content.get('vars', {})):
self.inventory.set_variable(group_name, var_name, var_value)
# Fetch extra variables if told to do so
if self.get_option('include_metadata'):
config_data = module.get_endpoint('/api/v2/config/')['json']
server_data = {}
server_data['license_type'] = config_data.get('license_info', {}).get('license_type', 'unknown')
for key in ('version', 'ansible_version'):
server_data[key] = config_data.get(key, 'unknown')
self.inventory.set_variable('all', 'tower_metadata', server_data)
# Clean up the inventory.
self.inventory.reconcile_inventory()
| {
"content_hash": "06953d6c77a0caf89bdd7e03e7e1869a",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 117,
"avg_line_length": 44.1578947368421,
"alnum_prop": 0.6633558469076943,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "7dc4aaa1a3fa5271dcd0d6af34dfc80f79c12fc8",
"size": "7682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx_collection/plugins/inventory/tower.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import logging
import re
from urllib.parse import parse_qsl, urlparse, urlunparse
from streamlink.plugin import Plugin, PluginError
from streamlink.plugin.api import useragents, validate
from streamlink.plugin.api.utils import itertags, parse_query
from streamlink.stream import HLSStream, HTTPStream
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.utils import parse_json, search_dict
log = logging.getLogger(__name__)
_config_schema = validate.Schema(
{
validate.optional("player_response"): validate.all(
validate.text,
validate.transform(parse_json),
{
validate.optional("streamingData"): {
validate.optional("hlsManifestUrl"): validate.text,
validate.optional("formats"): [{
"itag": int,
validate.optional("url"): validate.text,
validate.optional("cipher"): validate.text,
"qualityLabel": validate.text
}],
validate.optional("adaptiveFormats"): [{
"itag": int,
"mimeType": validate.all(
validate.text,
validate.transform(
lambda t:
[t.split(';')[0].split('/')[0], t.split(';')[1].split('=')[1].strip('"')]
),
[validate.text, validate.text],
),
validate.optional("url"): validate.url(scheme="http"),
validate.optional("cipher"): validate.text,
validate.optional("signatureCipher"): validate.text,
validate.optional("qualityLabel"): validate.text,
validate.optional("bitrate"): int
}]
},
validate.optional("videoDetails"): {
validate.optional("isLive"): validate.transform(bool),
validate.optional("author"): validate.text,
validate.optional("title"): validate.text
},
validate.optional("playabilityStatus"): {
validate.optional("status"): validate.text,
validate.optional("reason"): validate.text
},
},
),
"status": validate.text
}
)
_ytdata_re = re.compile(r'window\["ytInitialData"\]\s*=\s*({.*?});', re.DOTALL)
_url_re = re.compile(r"""(?x)https?://(?:\w+\.)?youtube\.com
(?:
(?:
/(?:
watch.+v=
|
embed/(?!live_stream)
|
v/
)(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(?:
(?:user|c(?:hannel)?)/
|
embed/live_stream\?channel=
)[^/?&]+
)
|
(?:
/(?:c/)?[^/?]+/live/?$
)
)
""")
class YouTube(Plugin):
_oembed_url = "https://www.youtube.com/oembed"
_video_info_url = "https://youtube.com/get_video_info"
_oembed_schema = validate.Schema(
{
"author_name": validate.text,
"title": validate.text
}
)
# There are missing itags
adp_video = {
137: "1080p",
299: "1080p60", # HFR
264: "1440p",
308: "1440p60", # HFR
266: "2160p",
315: "2160p60", # HFR
138: "2160p",
302: "720p60", # HFR
135: "480p",
133: "240p",
160: "144p",
}
adp_audio = {
140: 128,
141: 256,
171: 128,
249: 48,
250: 64,
251: 160,
256: 256,
258: 258,
}
def __init__(self, url):
super().__init__(url)
parsed = urlparse(self.url)
if parsed.netloc == 'gaming.youtube.com':
self.url = urlunparse(parsed._replace(netloc='www.youtube.com'))
self.author = None
self.title = None
self.video_id = None
self.session.http.headers.update({'User-Agent': useragents.CHROME})
def get_author(self):
if self.author is None:
self.get_oembed
return self.author
def get_title(self):
if self.title is None:
self.get_oembed
return self.title
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match_3d = re.match(r"(\w+)_3d", stream)
match_hfr = re.match(r"(\d+p)(\d+)", stream)
if match_3d:
weight, group = Plugin.stream_weight(match_3d.group(1))
weight -= 1
group = "youtube_3d"
elif match_hfr:
weight, group = Plugin.stream_weight(match_hfr.group(1))
weight += 1
group = "high_frame_rate"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
@property
def get_oembed(self):
if self.video_id is None:
self.video_id = self._find_video_id(self.url)
params = {
"url": "https://www.youtube.com/watch?v={0}".format(self.video_id),
"format": "json"
}
res = self.session.http.get(self._oembed_url, params=params)
data = self.session.http.json(res, schema=self._oembed_schema)
self.author = data["author_name"]
self.title = data["title"]
def _create_adaptive_streams(self, info, streams):
adaptive_streams = {}
best_audio_itag = None
# Extract audio streams from the adaptive format list
streaming_data = info.get("player_response", {}).get("streamingData", {})
for stream_info in streaming_data.get("adaptiveFormats", []):
if "url" not in stream_info:
continue
stream_params = dict(parse_qsl(stream_info["url"]))
if "itag" not in stream_params:
continue
itag = int(stream_params["itag"])
# extract any high quality streams only available in adaptive formats
adaptive_streams[itag] = stream_info["url"]
stream_type, stream_format = stream_info["mimeType"]
if stream_type == "audio":
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
# find the best quality audio stream m4a, opus or vorbis
if best_audio_itag is None or self.adp_audio[itag] > self.adp_audio[best_audio_itag]:
best_audio_itag = itag
if best_audio_itag and adaptive_streams and MuxedStream.is_usable(self.session):
aurl = adaptive_streams[best_audio_itag]
for itag, name in self.adp_video.items():
if itag in adaptive_streams:
vurl = adaptive_streams[itag]
log.debug("MuxedStream: v {video} a {audio} = {name}".format(
audio=best_audio_itag,
name=name,
video=itag,
))
streams[name] = MuxedStream(self.session,
HTTPStream(self.session, vurl),
HTTPStream(self.session, aurl))
return streams
def _find_video_id(self, url):
m = _url_re.match(url)
if m.group("video_id"):
log.debug("Video ID from URL")
return m.group("video_id")
res = self.session.http.get(url)
datam = _ytdata_re.search(res.text)
if datam:
data = parse_json(datam.group(1))
# find the videoRenderer object, where there is a LVE NOW badge
for vid_ep in search_dict(data, 'currentVideoEndpoint'):
video_id = vid_ep.get("watchEndpoint", {}).get("videoId")
if video_id:
log.debug("Video ID from currentVideoEndpoint")
return video_id
for x in search_dict(data, 'videoRenderer'):
if x.get("viewCountText", {}).get("runs"):
if x.get("videoId"):
log.debug("Video ID from videoRenderer (live)")
return x["videoId"]
for bstyle in search_dict(x.get("badges", {}), "style"):
if bstyle == "BADGE_STYLE_TYPE_LIVE_NOW":
if x.get("videoId"):
log.debug("Video ID from videoRenderer (live)")
return x["videoId"]
if "/embed/live_stream" in url:
for link in itertags(res.text, "link"):
if link.attributes.get("rel") == "canonical":
canon_link = link.attributes.get("href")
if canon_link != url:
if canon_link.endswith("v=live_stream"):
log.debug("The video is not available")
break
else:
log.debug("Re-directing to canonical URL: {0}".format(canon_link))
return self._find_video_id(canon_link)
raise PluginError("Could not find a video on this page")
def _get_stream_info(self, video_id):
# normal
_params_1 = {"el": "detailpage"}
# age restricted
_params_2 = {"el": "embedded"}
# embedded restricted
_params_3 = {"eurl": "https://youtube.googleapis.com/v/{0}".format(video_id)}
count = 0
info_parsed = None
for _params in (_params_1, _params_2, _params_3):
count += 1
params = {"video_id": video_id}
params.update(_params)
res = self.session.http.get(self._video_info_url, params=params)
info_parsed = parse_query(res.text, name="config", schema=_config_schema)
player_response = info_parsed.get("player_response", {})
playability_status = player_response.get("playabilityStatus", {})
if (playability_status.get("status") != "OK"):
reason = playability_status.get("reason")
log.debug("get_video_info - {0}: {1}".format(
count, reason)
)
continue
self.author = player_response.get("videoDetails", {}).get("author")
self.title = player_response.get("videoDetails", {}).get("title")
log.debug("get_video_info - {0}: Found data".format(count))
break
return info_parsed
def _get_streams(self):
is_live = False
self.video_id = self._find_video_id(self.url)
log.debug(f"Using video ID: {self.video_id}")
info = self._get_stream_info(self.video_id)
if info and info.get("status") == "fail":
log.error("Could not get video info: {0}".format(info.get("reason")))
return
elif not info:
log.error("Could not get video info")
return
if info.get("player_response", {}).get("videoDetails", {}).get("isLive"):
log.debug("This video is live.")
is_live = True
streams = {}
protected = False
if (info.get("player_response", {}).get("streamingData", {}).get("adaptiveFormats", [{}])[0].get("cipher")
or info.get("player_response", {}).get("streamingData", {}).get("adaptiveFormats", [{}])[0].get("signatureCipher")
or info.get("player_response", {}).get("streamingData", {}).get("formats", [{}])[0].get("cipher")):
protected = True
log.debug("This video may be protected.")
for stream_info in info.get("player_response", {}).get("streamingData", {}).get("formats", []):
if "url" not in stream_info:
continue
stream = HTTPStream(self.session, stream_info["url"])
name = stream_info["qualityLabel"]
streams[name] = stream
if not is_live:
streams = self._create_adaptive_streams(info, streams)
hls_manifest = info.get("player_response", {}).get("streamingData", {}).get("hlsManifestUrl")
if hls_manifest:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_manifest, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
log.warning(f"Failed to extract HLS streams: {err}")
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
| {
"content_hash": "6495a2cb1cb5ff1e22b007a7687178a8",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 125,
"avg_line_length": 37.01420454545455,
"alnum_prop": 0.5035689615473176,
"repo_name": "beardypig/streamlink",
"id": "ae06fe01774048b39c43ffe97456e5e3897714c1",
"size": "13029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/youtube.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
"""
WSGI config for favor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "favor.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "6f59c61ddfeb89732313573aa8d28fd2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.5,
"alnum_prop": 0.7714285714285715,
"repo_name": "gianpaima/favorless",
"id": "b970fe9eff4839c9e0747cd54efb59276154e298",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "favor/favor/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "503414"
},
{
"name": "HTML",
"bytes": "59493"
},
{
"name": "JavaScript",
"bytes": "1253315"
},
{
"name": "Python",
"bytes": "68487"
}
],
"symlink_target": ""
} |
import wx
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
import os.path
from robotide.controller.filecontrollers import (ResourceFileController,
TestCaseFileController)
from robotide.pluginapi import (Plugin, ActionInfo, RideOpenSuite,
RideOpenResource, RideImportSetting, RideUserKeyword, RideNewProject)
from robotide.usages.UsageRunner import Usages
from robotide import utils
from robotide.widgets import (PopupMenuItem, ButtonWithHandler, Label, Font,
HtmlWindow, ImageProvider)
ALL_KEYWORDS = '<all keywords>'
ALL_USER_KEYWORDS = '<all user keywords>'
ALL_LIBRARY_KEYWORDS = '<all library keywords>'
class KeywordSearch(Plugin):
"""A plugin for searching keywords based on name or documentation."""
def __init__(self, app):
Plugin.__init__(self, app)
self.all_keywords = []
self._criteria = _SearchCriteria()
self.dirty = False
def enable(self):
action = ActionInfo('Tools', 'Search Keywords', self.OnSearch,
shortcut='F5',
doc='Search keywords from libraries and resources',
icon=ImageProvider().KW_SEARCH_ICON,
position=51)
self.register_action(action)
self.register_search_action('Search Keywords', self.show_search_for, ImageProvider().KW_SEARCH_ICON)
self.subscribe(self.mark_dirty, RideOpenSuite, RideOpenResource,
RideImportSetting, RideUserKeyword, RideNewProject)
self._dialog = KeywordSearchDialog(self.frame, self)
self.tree.register_context_menu_hook(self._search_resource)
def OnSearch(self, event):
self._dialog.show_search_with_criteria()
def mark_dirty(self, message):
self.dirty = True
def have_keywords_changed(self):
if not self.dirty:
return False
self._update()
return True
def _update(self):
self.dirty = False
self.all_keywords = self.model.get_all_keywords()
def search(self, pattern, search_docs, source_filter):
self._criteria = _SearchCriteria(pattern, search_docs, source_filter)
return self._search()
def _search(self):
return [ kw for kw in self.all_keywords if self._criteria.matches(kw) ]
def _search_resource(self, item):
if isinstance(item, (TestCaseFileController, ResourceFileController)):
callable = lambda x: self._show_resource(os.path.basename(item.source))
return [PopupMenuItem('Search Keywords', callable=callable)]
return []
def _show_resource(self, resource):
self._dialog.show_search_with_criteria(source=resource)
def show_search_for(self, pattern):
self._dialog.show_search_with_criteria(pattern=pattern)
def disable(self):
self.unregister_actions()
class _SearchCriteria(object):
def __init__(self, pattern='', search_docs=True, source_filter=ALL_KEYWORDS):
self._pattern = pattern
self._search_docs = search_docs
self._source_filter = source_filter
def matches(self, kw):
if not self._matches_source_filter(kw):
return False
if self._contains(kw.name, self._pattern):
return True
return self._search_docs and self._contains(kw.doc, self._pattern)
def _matches_source_filter(self, kw):
if self._source_filter == ALL_KEYWORDS:
return True
if self._source_filter == ALL_USER_KEYWORDS and kw.is_user_keyword():
return True
if self._source_filter == ALL_LIBRARY_KEYWORDS and kw.is_library_keyword():
return True
return self._source_filter == kw.source
def _contains(self, string, pattern):
return utils.normalize(pattern) in utils.normalize(string)
class KeywordSearchDialog(wx.Frame):
def __init__(self, parent, searcher):
wx.Frame.__init__(self, parent, title="Search Keywords", style=wx.DEFAULT_FRAME_STYLE|wx.FRAME_FLOAT_ON_PARENT)
self._plugin = searcher
self._create_components()
self._make_bindings()
self._sort_order = _SortOrder()
self._last_selected_kw = None
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE))
self.CenterOnParent()
def _create_components(self):
self.SetSizer(wx.BoxSizer(wx.VERTICAL))
self._add_search_control()
self._add_keyword_list()
self._add_keyword_details()
self.SetSize((700,500))
def _add_search_control(self):
line1 = self._horizontal_sizer()
self._add_pattern_filter(line1)
self._add_doc_filter(line1)
self.Sizer.Add(line1, 0, wx.ALL, 3)
line2 = self._horizontal_sizer()
self._add_source_filter(line2)
self.Sizer.Add(line2, 0, wx.ALL, 3)
def _horizontal_sizer(self):
return wx.BoxSizer(wx.HORIZONTAL)
def _add_pattern_filter(self, sizer):
sizer.Add(Label(self, label='Search term: '))
self._search_control = wx.SearchCtrl(self, size=(200,-1),
style=wx.TE_PROCESS_ENTER)
sizer.Add(self._search_control)
def _add_doc_filter(self, sizer):
self._use_doc = wx.CheckBox(self, label='Search documentation')
self._use_doc.SetValue(True)
sizer.Add(self._use_doc)
def _add_source_filter(self, sizer):
sizer.Add(Label(self, label='Source: '))
self._source_filter = wx.ComboBox(self, value=ALL_KEYWORDS, size=(300, -1),
choices=self._get_sources(), style=wx.CB_READONLY)
sizer.Add(self._source_filter)
def _get_sources(self):
sources = []
for kw in self._plugin.all_keywords:
if kw.source not in sources:
sources.append(kw.source)
return [ALL_KEYWORDS, ALL_USER_KEYWORDS, ALL_LIBRARY_KEYWORDS] + sorted(sources)
def _add_keyword_list(self):
self._list = _KeywordList(self, self._plugin)
self._list.SetSize(self.Size)
self._add_to_sizer(self._list)
def _add_keyword_details(self):
self._details = HtmlWindow(self)
self._add_to_sizer(self._details)
self._find_usages_button = ButtonWithHandler(self, 'Find Usages')
self.Sizer.Add(self._find_usages_button, 0, wx.ALL, 3)
def _add_to_sizer(self, component):
self.Sizer.Add(component, 1, wx.EXPAND | wx.ALL, 3)
def OnFindUsages(self, event):
Usages(self._plugin.model, self._plugin.tree.highlight, self._last_selected_kw.name, self._last_selected_kw).show()
def _make_bindings(self):
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self._list)
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnSearch,
self._search_control)
self.Bind(wx.EVT_TEXT_ENTER, self.OnSearch, self._search_control)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_CHECKBOX, self.OnUseDocChange, self._use_doc)
self.Bind(wx.EVT_COMBOBOX, self.OnSourceFilterChange, self._source_filter)
self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick)
def OnColClick(self,event):
col = event.GetColumn()
if self._sort_order.is_sortable_column(col):
self._sort_order.sort(col)
self._populate_search()
event.Skip()
def OnActivate(self, event):
if self._plugin.have_keywords_changed():
self._update_sources()
self._populate_search()
def OnUseDocChange(self, event):
self._populate_search()
def OnSearch(self, event):
self._sort_order.searched(self._get_search_text())
self._populate_search()
def OnSourceFilterChange(self, event):
self._populate_search()
def OnKey(self, event):
# Needed for HtmlWindow callback
pass
def OnItemSelected(self, event):
self._last_selected_kw = self._keywords[event.Index]
self._update_details()
def _update_sources(self):
selection = self._source_filter.GetValue()
self._source_filter.Clear()
for source in self._get_sources():
self._source_filter.Append(source)
self._source_filter.SetValue(selection)
if self._source_filter.GetValue() != selection:
self._source_filter.SetValue(ALL_KEYWORDS)
def OnClose(self, event):
self.Hide()
def _populate_search(self):
self._keywords = _KeywordData(self._plugin.search(*self._get_search_criteria()),
self._sort_order, self._get_search_text())
self._update_keyword_selection()
self._list.show_keywords(self._keywords, self._last_selected_kw)
self.Refresh()
def _get_search_criteria(self):
return self._get_search_text(), self._use_doc.GetValue(), self._source_filter.GetValue()
def _get_search_text(self):
return self._search_control.GetValue().lower()
def _update_keyword_selection(self):
if not self._last_selected_kw in self._keywords and self._keywords:
self._last_selected_kw = self._keywords[0]
self._update_details()
def _update_details(self):
if self._last_selected_kw in self._keywords:
self._details.SetPage(self._last_selected_kw.details)
self._find_usages_button.Enable()
else:
self._details.clear()
self._find_usages_button.Disable()
def show_search_with_criteria(self, pattern='', search_docs=True, source=ALL_KEYWORDS):
self._update_widgets(pattern, search_docs, source)
self._populate_search()
self._show()
self._search_control.SetFocus()
def _update_widgets(self, pattern, search_docs, source):
self._search_control.SetValue(pattern)
self._use_doc.SetValue(search_docs)
self._source_filter.SetValue(source)
def _show(self):
if not self.IsShown():
self.Show()
self.Raise()
class _SortOrder(object):
def __init__(self):
self.sort_up = True
self.column = 0
self.default_order = False
def searched(self, term):
self.__init__()
if term:
self.default_order = True
def swap_direction(self):
self.sort_up = not self.sort_up
def is_sortable_column(self, col):
return col < 2
def sort(self, col):
if self._has_been_sorted_by(col):
self.swap_direction()
else:
self.sort_up = True
self.column = col
self.default_order = False
def _has_been_sorted_by(self, col):
return self.column == col and not self.default_order
class _KeywordData(list):
headers = ['Name', 'Source', 'Description']
def __init__(self, keywords, sort_order, search_criteria=None):
self.extend(self._sort(keywords, sort_order, search_criteria))
def _sort(self, keywords, sort_order, search_criteria=None):
if sort_order.default_order:
return self._sort_by_search(keywords, sort_order, search_criteria)
return self._sort_by_attr(keywords, sort_order)
def _sort_by_search(self, keywords, sort_order, search_criteria):
search_criteria = search_criteria.lower()
starts_with = [kw for kw in keywords if kw.name.lower().startswith(search_criteria)]
name_contains = [kw for kw in keywords if (search_criteria in kw.name.lower()
and kw not in starts_with)]
doc_contains = [kw for kw in keywords if (search_criteria in kw.details.lower()
and kw not in starts_with
and kw not in name_contains)]
result = []
for to_sort in (starts_with, name_contains, doc_contains):
result.extend(self._sort_by_attr(to_sort, sort_order))
return result
def _sort_by_attr(self, keywords, sort_order):
return sorted(keywords, cmp=self._get_comparator_for(self.headers[sort_order.column].lower()),
reverse=not sort_order.sort_up)
def _get_comparator_for(self, atrr_name):
return lambda kw, kw2: cmp(self._value_lowerer(kw, atrr_name),
self._value_lowerer(kw2, atrr_name))
def _value_lowerer(self, kw, attr_name):
return getattr(kw, attr_name).lower()
class _KeywordList(wx.ListCtrl, ListCtrlAutoWidthMixin):
def __init__(self, parent, plugin):
style = wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL|wx.LC_HRULES|wx.LC_VIRTUAL
wx.ListCtrl.__init__(self, parent, style=style)
ListCtrlAutoWidthMixin.__init__(self)
self._plugin = plugin
self._create_headers()
self._link_attribute = self._create_link_attribute()
self._image_list = self._create_image_list()
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
def _create_headers(self):
for col, title in enumerate(_KeywordData.headers):
self.InsertColumn(col, title)
self.SetColumnWidth(0, 250)
def _create_link_attribute(self):
attr = wx.ListItemAttr()
attr.SetTextColour(wx.BLUE)
attr.SetFont(Font().underlined)
return attr
def _create_image_list(self):
imglist = wx.ImageList(16, 16)
imglist.Add(wx.ArtProvider_GetBitmap(wx.ART_GO_UP, wx.ART_OTHER, (16, 16)))
self.SetImageList(imglist, wx.IMAGE_LIST_SMALL)
return imglist
def show_keywords(self, keywords, kw_selection):
self._keywords = keywords
self.SetItemCount(len(self._keywords))
if keywords:
index = self._keywords.index(kw_selection)
self.Select(index)
self.Focus(index)
def OnLeftUp(self, event):
item, flags = self.HitTest(event.Position)
if item == wx.NOT_FOUND:
return
kw = self._keywords[item]
if kw.is_user_keyword() and (flags & wx.LIST_HITTEST_ONITEMICON):
self._plugin.select_user_keyword_node(kw.item)
def OnGetItemText(self, row, col):
kw = self._keywords[row]
return [kw.name, kw.source, kw.shortdoc][col]
def OnGetItemImage(self, item):
if self._keywords[item].is_user_keyword():
return 0 # index in self._image_list
return -1 # No image
| {
"content_hash": "60cce38ca1397715c4ff308a8565ab5d",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 124,
"avg_line_length": 37.1530612244898,
"alnum_prop": 0.6160395495742927,
"repo_name": "fingeronthebutton/RIDE",
"id": "69d0951d87ede07be2744cebdaee3681f4722ad8",
"size": "15172",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/robotide/ui/keywordsearch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110675"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2902622"
}
],
"symlink_target": ""
} |
from .rules import Rules
import json
from urlparse import urlparse, parse_qs
class Referrer:
class Types:
INDIRECT = 'indirect'
DIRECT = 'direct'
SEARCH = 'search'
SOCIAL = 'social'
EMAIL = 'email'
rules = Rules()
@staticmethod
def parse_direct(raw_url, direct_domains, url=None):
if not url:
url = urlparse(raw_url)
domain = url.netloc
if domain not in direct_domains:
return None
return {
'type': Referrer.Types.DIRECT,
'url': raw_url,
'domain': domain,
}
@staticmethod
def parse_email(raw_url, url=None):
if not url:
url = urlparse(raw_url)
domain = url.netloc
if domain not in Referrer.rules.email:
return None
rule = Referrer.rules.email[domain]
return {
'type': Referrer.Types.EMAIL,
'url': raw_url,
'domain': rule['domain'],
'label': rule['label'],
}
@staticmethod
def parse_social(raw_url, url=None):
if not url:
url = urlparse(raw_url)
domain = url.netloc
if domain not in Referrer.rules.social:
return None
rule = Referrer.rules.social[domain]
return {
'type': Referrer.Types.SOCIAL,
'url': raw_url,
'domain': rule['domain'],
'label': rule['label'],
}
@staticmethod
def parse_search_fuzzy(raw_url, url=None):
if not url:
url = urlparse(raw_url)
domain = url.netloc
host_parts = domain.split('.')
for host_part in host_parts:
if host_part not in Referrer.rules.search_fuzzy:
continue
rule = Referrer.rules.search_fuzzy[host_part]
query_params = parse_qs(url.query, keep_blank_values=True)
query_common = set.intersection(set(query_params.keys()), set(rule['parameters']))
fragment_params = parse_qs(url.fragment, keep_blank_values=True)
fragment_common = set.intersection(set(fragment_params.keys()), set(rule['parameters']))
query = None
if len(query_common) > 0:
query = query_params[list(query_common)[0]][0]
elif len(fragment_common) > 0:
query = fragment_params[list(fragment_common)[0]][0]
elif '*' in rule['parameters']:
query = ''
if query is not None:
return {
'type': Referrer.Types.SEARCH,
'url': raw_url,
'domain': domain,
'label': rule['label'],
'query': query,
}
return None
@staticmethod
def parse_search(raw_url, url=None):
if not url:
url = urlparse(raw_url)
domain = url.netloc
if domain not in Referrer.rules.search:
return Referrer.parse_search_fuzzy(raw_url, url=url)
rule = Referrer.rules.search[domain]
query_params = parse_qs(url.query, keep_blank_values=True)
query_common = set.intersection(set(query_params.keys()), set(rule['parameters']))
fragment_params = parse_qs(url.fragment, keep_blank_values=True)
fragment_common = set.intersection(set(fragment_params.keys()), set(rule['parameters']))
query = ''
if len(query_common) > 0:
query = query_params[list(query_common)[0]][0]
elif len(fragment_common) > 0:
query = fragment_params[list(fragment_common)[0]][0]
elif '*' in rule['parameters']:
query = ''
else:
return Referrer.parse_search_fuzzy(raw_url, url=url)
return {
'type': Referrer.Types.SEARCH,
'url': raw_url,
'domain': rule['domain'],
'label': rule['label'],
'query': query,
}
@staticmethod
def parse_indirect(raw_url, url=None):
if not url:
url = urlparse(raw_url)
return {
'type': Referrer.Types.INDIRECT,
'url': raw_url,
'domain': url.netloc,
}
@staticmethod
def parse(raw_url, direct_domains=[]):
url = urlparse(raw_url)
referrer = Referrer.parse_direct(raw_url, direct_domains, url=url);
if referrer:
return referrer
referrer = Referrer.parse_email(raw_url, url=url)
if referrer:
return referrer
referrer = Referrer.parse_social(raw_url, url=url)
if referrer:
return referrer
referrer = Referrer.parse_search(raw_url, url=url)
if referrer:
return referrer
return Referrer.parse_indirect(raw_url, url=url)
def parse(raw_url, direct_domains=[]):
return Referrer(raw_url, direct_domains).parse
| {
"content_hash": "9241ef5d4abe11acd0b9e24ab324232a",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 94,
"avg_line_length": 28.355263157894736,
"alnum_prop": 0.6176334106728538,
"repo_name": "snormore/pyreferrer",
"id": "24d8f314d3286ef48f132e435232f7caffaa5741",
"size": "4310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyreferrer/referrer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17029"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.template import loader
from django.http import JsonResponse
from django.core import serializers
from django.shortcuts import redirect
from OmniDB import settings
import json
import sys
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
import OmniDB_app.include.OmniDatabase as OmniDatabase
from OmniDB_app.include.Session import Session
from OmniDB import settings, custom_settings
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout as logout_django
from OmniDB_app.models.main import *
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from OmniDB_app.views.memory_objects import *
import logging
logger = logging.getLogger(__name__)
@login_required
def check_session(request):
# User is authenticated, check if user details object exists.
try:
user_details = UserDetails.objects.get(user=request.user)
# User details does not exist, create it.
except Exception:
user_details = UserDetails(user=request.user)
user_details.save()
#Invalid session
if not request.session.get('omnidb_session'):
#creating session key to use it
request.session.save()
v_session = Session(
request.user.id,
request.user.username,
'light',
user_details.font_size,
request.user.is_superuser,
request.session.session_key,
user_details.csv_encoding,
user_details.csv_delimiter
)
request.session['omnidb_session'] = v_session
return redirect(settings.PATH + '/workspace')
def index(request):
context = {
'omnidb_short_version': settings.OMNIDB_SHORT_VERSION,
'url_folder': settings.PATH,
'csrf_cookie_name': settings.CSRF_COOKIE_NAME
}
user = request.GET.get('user', '')
pwd = request.GET.get('pwd', '')
if user and pwd:
num_connections = sign_in_automatic(request,user,pwd)
if num_connections >= 0:
return redirect('/')
else:
return HttpResponse("INVALID APP TOKEN")
template = loader.get_template('OmniDB_app/login.html')
return HttpResponse(template.render(context, request))
@user_authenticated
def logout(request):
v_session = request.session.get('omnidb_session')
logger.info('User "{0}" logged out.'.format(v_session.v_user_name))
logout_django(request)
return redirect(settings.PATH + '/omnidb_login')
def check_session_message(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
if request.session.get('omnidb_alert_message'):
v_return['v_data'] = request.session.get('omnidb_alert_message')
request.session['omnidb_alert_message'] = ''
return JsonResponse(v_return)
def sign_in_automatic(request, username, pwd):
token = request.GET.get('token', '')
valid_token = custom_settings.APP_TOKEN
if valid_token and token != valid_token:
return -1
user = authenticate(username=username, password=pwd)
if user is not None:
login(request, user)
else:
return -1
logger.info('User "{0}" logged in.'.format(username))
return 0
def create_user_session(request, user, user_details):
#creating session key to use it
request.session.save()
v_session = Session(
user.id,
user.username,
'light',
user_details.font_size,
request.user.is_superuser,
request.session.session_key,
user_details.csv_encoding,
user_details.csv_delimiter
)
request.session['omnidb_session'] = v_session
def sign_in(request):
v_return = {}
v_return['v_data'] = -1
v_return['v_error'] = False
v_return['v_error_id'] = -1
valid_token = custom_settings.APP_TOKEN
if valid_token:
v_return['v_data'] = -2
return JsonResponse(v_return)
json_object = json.loads(request.POST.get('data', None))
username = json_object['p_username']
pwd = json_object['p_pwd']
user = authenticate(username=username, password=pwd)
if user is not None:
login(request, user)
else:
return JsonResponse(v_return)
logger.info('User "{0}" logged in.'.format(username))
v_return['v_data'] = 0
return JsonResponse(v_return)
| {
"content_hash": "5519f1078cbd4872766c67cb34c170b2",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 72,
"avg_line_length": 27.39156626506024,
"alnum_prop": 0.6624147789751484,
"repo_name": "OmniDB/OmniDB",
"id": "955bb385074c8b4f8c44f0b451da23f555991a6e",
"size": "4547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OmniDB/OmniDB_app/views/login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19630"
},
{
"name": "C++",
"bytes": "302"
},
{
"name": "CSS",
"bytes": "304604"
},
{
"name": "Dockerfile",
"bytes": "13652"
},
{
"name": "HTML",
"bytes": "95804"
},
{
"name": "JavaScript",
"bytes": "20832908"
},
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "PLpgSQL",
"bytes": "6153"
},
{
"name": "Python",
"bytes": "2766750"
},
{
"name": "Ruby",
"bytes": "25824"
},
{
"name": "SQLPL",
"bytes": "88625"
},
{
"name": "Shell",
"bytes": "59204"
},
{
"name": "TSQL",
"bytes": "88280"
}
],
"symlink_target": ""
} |
import json
import math
import re
from decimal import Decimal
from django.contrib.gis.db.models import functions
from django.contrib.gis.geos import (
GEOSGeometry, LineString, Point, Polygon, fromstr,
)
from django.contrib.gis.measure import Area
from django.db import connection
from django.db.models import Sum
from django.test import TestCase, skipUnlessDBFeature
from ..utils import mysql, oracle, postgis, spatialite
from .models import City, Country, CountryWebMercator, State, Track
@skipUnlessDBFeature("gis_enabled")
class GISFunctionsTests(TestCase):
"""
Testing functions from django/contrib/gis/db/models/functions.py.
Area/Distance/Length/Perimeter are tested in distapp/tests.
Please keep the tests in function's alphabetic order.
"""
fixtures = ['initial']
def test_asgeojson(self):
# Only PostGIS and SpatiaLite support GeoJSON.
if not connection.features.has_AsGeoJSON_function:
with self.assertRaises(NotImplementedError):
list(Country.objects.annotate(json=functions.AsGeoJSON('mpoly')))
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
# MySQL ignores the crs option.
if mysql:
houston_json = json.loads(houston_json)
del houston_json['crs']
chicago_json = json.loads(chicago_json)
del chicago_json['crs']
# Precision argument should only be an integer
with self.assertRaises(TypeError):
City.objects.annotate(geojson=functions.AsGeoJSON('point', precision='foo'))
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertJSONEqual(
pueblo_json,
City.objects.annotate(geojson=functions.AsGeoJSON('point')).get(name='Pueblo').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertJSONEqual(
City.objects.annotate(json=functions.AsGeoJSON('point', crs=True)).get(name='Houston').json,
houston_json,
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertJSONEqual(
victoria_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True)
).get(name='Victoria').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertJSONEqual(
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True, crs=True, precision=5)
).get(name='Chicago').geojson,
chicago_json,
)
@skipUnlessDBFeature("has_AsGML_function")
def test_asgml(self):
# Should throw a TypeError when trying to obtain GML from a
# non-geometry field.
qs = City.objects.all()
with self.assertRaises(TypeError):
qs.annotate(gml=functions.AsGML('name'))
ptown = City.objects.annotate(gml=functions.AsGML('point', precision=9)).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
self.assertTrue(gml_regex.match(ptown.gml))
self.assertIn(
'<gml:pos srsDimension="2">',
City.objects.annotate(gml=functions.AsGML('point', version=3)).get(name='Pueblo').gml
)
@skipUnlessDBFeature("has_AsKML_function")
def test_askml(self):
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
with self.assertRaises(TypeError):
City.objects.annotate(kml=functions.AsKML('name'))
# Ensuring the KML is as expected.
qs = City.objects.annotate(kml=functions.AsKML('point', precision=9))
ptown = qs.get(name='Pueblo')
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
# Same result if the queryset is evaluated again.
self.assertEqual(qs.get(name='Pueblo').kml, ptown.kml)
@skipUnlessDBFeature("has_AsSVG_function")
def test_assvg(self):
with self.assertRaises(TypeError):
City.objects.annotate(svg=functions.AsSVG('point', precision='foo'))
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.annotate(svg=functions.AsSVG('point')).get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.annotate(svg=functions.AsSVG('point', relative=5)).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_Azimuth_function")
def test_azimuth(self):
# Returns the azimuth in radians.
azimuth_expr = functions.Azimuth(Point(0, 0, srid=4326), Point(1, 1, srid=4326))
self.assertAlmostEqual(City.objects.annotate(azimuth=azimuth_expr).first().azimuth, math.pi / 4)
# Returns None if the two points are coincident.
azimuth_expr = functions.Azimuth(Point(0, 0, srid=4326), Point(0, 0, srid=4326))
self.assertIsNone(City.objects.annotate(azimuth=azimuth_expr).first().azimuth)
@skipUnlessDBFeature("has_BoundingCircle_function")
def test_bounding_circle(self):
def circle_num_points(num_seg):
# num_seg is the number of segments per quarter circle.
return (4 * num_seg) + 1
expected_areas = (169, 136) if postgis else (171, 126)
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly')).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, expected_areas[0], 0)
self.assertAlmostEqual(qs[1].circle.area, expected_areas[1], 0)
if postgis:
# By default num_seg=48.
self.assertEqual(qs[0].circle.num_points, circle_num_points(48))
self.assertEqual(qs[1].circle.num_points, circle_num_points(48))
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly', num_seg=12)).order_by('name')
if postgis:
self.assertGreater(qs[0].circle.area, 168.4, 0)
self.assertLess(qs[0].circle.area, 169.5, 0)
self.assertAlmostEqual(qs[1].circle.area, 136, 0)
self.assertEqual(qs[0].circle.num_points, circle_num_points(12))
self.assertEqual(qs[1].circle.num_points, circle_num_points(12))
else:
self.assertAlmostEqual(qs[0].circle.area, expected_areas[0], 0)
self.assertAlmostEqual(qs[1].circle.area, expected_areas[1], 0)
@skipUnlessDBFeature("has_Centroid_function")
def test_centroid(self):
qs = State.objects.exclude(poly__isnull=True).annotate(centroid=functions.Centroid('poly'))
tol = 1.8 if mysql else (0.1 if oracle else 0.00001)
for state in qs:
self.assertTrue(state.poly.centroid.equals_exact(state.centroid, tol))
with self.assertRaisesMessage(TypeError, "'Centroid' takes exactly 1 argument (2 given)"):
State.objects.annotate(centroid=functions.Centroid('poly', 'poly'))
@skipUnlessDBFeature("has_Difference_function")
def test_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(diff=functions.Difference('mpoly', geom))
# Oracle does something screwy with the Texas geometry.
if oracle:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.diff))
@skipUnlessDBFeature("has_Difference_function", "has_Transform_function")
def test_difference_mixed_srid(self):
"""Testing with mixed SRID (Country has default 4326)."""
geom = Point(556597.4, 2632018.6, srid=3857) # Spherical mercator
qs = Country.objects.annotate(difference=functions.Difference('mpoly', geom))
# Oracle does something screwy with the Texas geometry.
if oracle:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.difference))
@skipUnlessDBFeature("has_Envelope_function")
def test_envelope(self):
countries = Country.objects.annotate(envelope=functions.Envelope('mpoly'))
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("has_ForceRHR_function")
def test_force_rhr(self):
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
st = State.objects.annotate(force_rhr=functions.ForceRHR('poly')).get(name='Foo')
self.assertEqual(rhr_rings, st.force_rhr.coords)
@skipUnlessDBFeature("has_GeoHash_function")
def test_geohash(self):
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.annotate(geohash=functions.GeoHash('point')).get(name='Houston')
h2 = City.objects.annotate(geohash=functions.GeoHash('point', precision=5)).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash[:len(ref_hash)])
self.assertEqual(ref_hash[:5], h2.geohash)
@skipUnlessDBFeature("has_Intersection_function")
def test_intersection(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(inter=functions.Intersection('mpoly', geom))
for c in qs:
if spatialite or (mysql and not connection.ops.uses_invalid_empty_geometry_collection) or oracle:
# When the intersection is empty, some databases return None.
expected = None
else:
expected = c.mpoly.intersection(geom)
self.assertEqual(c.inter, expected)
@skipUnlessDBFeature("has_IsValid_function")
def test_isvalid(self):
valid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))')
State.objects.create(name='valid', poly=valid_geom)
State.objects.create(name='invalid', poly=invalid_geom)
valid = State.objects.filter(name='valid').annotate(isvalid=functions.IsValid('poly')).first()
invalid = State.objects.filter(name='invalid').annotate(isvalid=functions.IsValid('poly')).first()
self.assertIs(valid.isvalid, True)
self.assertIs(invalid.isvalid, False)
@skipUnlessDBFeature("has_Area_function")
def test_area_with_regular_aggregate(self):
# Create projected country objects, for this test to work on all backends.
for c in Country.objects.all():
CountryWebMercator.objects.create(name=c.name, mpoly=c.mpoly)
# Test in projected coordinate system
qs = CountryWebMercator.objects.annotate(area_sum=Sum(functions.Area('mpoly')))
# Some backends (e.g. Oracle) cannot group by multipolygon values, so
# defer such fields in the aggregation query.
for c in qs.defer('mpoly'):
result = c.area_sum
# If the result is a measure object, get value.
if isinstance(result, Area):
result = result.sq_m
self.assertAlmostEqual((result - c.mpoly.area) / c.mpoly.area, 0)
@skipUnlessDBFeature("has_Area_function")
def test_area_lookups(self):
# Create projected countries so the test works on all backends.
CountryWebMercator.objects.bulk_create(
CountryWebMercator(name=c.name, mpoly=c.mpoly.transform(3857, clone=True))
for c in Country.objects.all()
)
qs = CountryWebMercator.objects.annotate(area=functions.Area('mpoly'))
self.assertEqual(qs.get(area__lt=Area(sq_km=500000)), CountryWebMercator.objects.get(name='New Zealand'))
with self.assertRaisesMessage(ValueError, 'AreaField only accepts Area measurement objects.'):
qs.get(area__lt=500000)
@skipUnlessDBFeature("has_LineLocatePoint_function")
def test_line_locate_point(self):
pos_expr = functions.LineLocatePoint(LineString((0, 0), (0, 3), srid=4326), Point(0, 1, srid=4326))
self.assertAlmostEqual(State.objects.annotate(pos=pos_expr).first().pos, 0.3333333)
@skipUnlessDBFeature("has_MakeValid_function")
def test_make_valid(self):
invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))')
State.objects.create(name='invalid', poly=invalid_geom)
invalid = State.objects.filter(name='invalid').annotate(repaired=functions.MakeValid('poly')).first()
self.assertIs(invalid.repaired.valid, True)
self.assertEqual(invalid.repaired, fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))', srid=invalid.poly.srid))
@skipUnlessDBFeature("has_MemSize_function")
def test_memsize(self):
ptown = City.objects.annotate(size=functions.MemSize('point')).get(name='Pueblo')
self.assertTrue(20 <= ptown.size <= 40) # Exact value may depend on PostGIS version
@skipUnlessDBFeature("has_NumGeom_function")
def test_num_geom(self):
# Both 'countries' only have two geometries.
for c in Country.objects.annotate(num_geom=functions.NumGeometries('mpoly')):
self.assertEqual(2, c.num_geom)
qs = City.objects.filter(point__isnull=False).annotate(num_geom=functions.NumGeometries('point'))
for city in qs:
# Oracle and PostGIS return 1 for the number of geometries on
# non-collections, whereas MySQL returns None.
if mysql:
self.assertIsNone(city.num_geom)
else:
self.assertEqual(1, city.num_geom)
@skipUnlessDBFeature("has_NumPoint_function")
def test_num_points(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
qs = Track.objects.annotate(num_points=functions.NumPoints('line'))
self.assertEqual(qs.first().num_points, 2)
mpoly_qs = Country.objects.annotate(num_points=functions.NumPoints('mpoly'))
if not connection.features.supports_num_points_poly:
msg = 'NumPoints can only operate on LineString content on this database.'
with self.assertRaisesMessage(TypeError, msg):
list(mpoly_qs)
return
for c in mpoly_qs:
self.assertEqual(c.mpoly.num_points, c.num_points)
for c in City.objects.annotate(num_points=functions.NumPoints('point')):
self.assertEqual(c.num_points, 1)
@skipUnlessDBFeature("has_PointOnSurface_function")
def test_point_on_surface(self):
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
qs = Country.objects.annotate(point_on_surface=functions.PointOnSurface('mpoly'))
for country in qs:
tol = 0.00001 # SpatiaLite might have WKT-translation-related precision issues
self.assertTrue(ref[country.name].equals_exact(country.point_on_surface, tol))
@skipUnlessDBFeature("has_Reverse_function")
def test_reverse_geom(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
track = Track.objects.annotate(reverse_geom=functions.Reverse('line')).get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), track.reverse_geom.coords)
@skipUnlessDBFeature("has_Scale_function")
def test_scale(self):
xfac, yfac = 2, 3
tol = 5 # The low precision tolerance is for SpatiaLite
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', xfac, yfac))
for country in qs:
for p1, p2 in zip(country.mpoly, country.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
# Test float/Decimal values
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', 1.5, Decimal('2.5')))
self.assertGreater(qs[0].scaled.area, qs[0].mpoly.area)
@skipUnlessDBFeature("has_SnapToGrid_function")
def test_snap_to_grid(self):
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
with self.assertRaises(ValueError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
for bad_args in (('1.0',), (1.0, None), tuple(map(str, range(4)))):
with self.assertRaises(TypeError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.1)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23, 0.5, 0.17)
).get(name='San Marino').snap,
tol
)
)
@skipUnlessDBFeature("has_SymDifference_function")
def test_sym_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(sym_difference=functions.SymDifference('mpoly', geom))
# Oracle does something screwy with the Texas geometry.
if oracle:
qs = qs.exclude(name='Texas')
for country in qs:
self.assertTrue(country.mpoly.sym_difference(geom).equals(country.sym_difference))
@skipUnlessDBFeature("has_Transform_function")
def test_transform(self):
# Pre-transformed points for Houston and Pueblo.
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points.
h = City.objects.annotate(pt=functions.Transform('point', ptown.srid)).get(name='Pueblo')
self.assertEqual(2774, h.pt.srid)
self.assertAlmostEqual(ptown.x, h.pt.x, prec)
self.assertAlmostEqual(ptown.y, h.pt.y, prec)
@skipUnlessDBFeature("has_Translate_function")
def test_translate(self):
xfac, yfac = 5, -23
qs = Country.objects.annotate(translated=functions.Translate('mpoly', xfac, yfac))
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# Some combined function tests
@skipUnlessDBFeature(
"has_Difference_function", "has_Intersection_function",
"has_SymDifference_function", "has_Union_function")
def test_diff_intersection_union(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.all().annotate(
difference=functions.Difference('mpoly', geom),
sym_difference=functions.SymDifference('mpoly', geom),
union=functions.Union('mpoly', geom),
intersection=functions.Intersection('mpoly', geom),
)
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
return
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.difference))
if not (spatialite or mysql):
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
self.assertTrue(c.mpoly.sym_difference(geom).equals(c.sym_difference))
self.assertTrue(c.mpoly.union(geom).equals(c.union))
@skipUnlessDBFeature("has_Union_function")
def test_union(self):
"""Union with all combinations of geometries/geometry fields."""
geom = Point(-95.363151, 29.763374, srid=4326)
union = City.objects.annotate(union=functions.Union('point', geom)).get(name='Dallas').union
expected = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)', srid=4326)
self.assertTrue(expected.equals(union))
union = City.objects.annotate(union=functions.Union(geom, 'point')).get(name='Dallas').union
self.assertTrue(expected.equals(union))
union = City.objects.annotate(union=functions.Union('point', 'point')).get(name='Dallas').union
expected = GEOSGeometry('POINT(-96.801611 32.782057)', srid=4326)
self.assertTrue(expected.equals(union))
union = City.objects.annotate(union=functions.Union(geom, geom)).get(name='Dallas').union
self.assertTrue(geom.equals(union))
@skipUnlessDBFeature("has_Union_function", "has_Transform_function")
def test_union_mixed_srid(self):
"""The result SRID depends on the order of parameters."""
geom = Point(61.42915, 55.15402, srid=4326)
geom_3857 = geom.transform(3857, clone=True)
tol = 0.001
for city in City.objects.annotate(union=functions.Union('point', geom_3857)):
expected = city.point | geom
self.assertTrue(city.union.equals_exact(expected, tol))
self.assertEqual(city.union.srid, 4326)
for city in City.objects.annotate(union=functions.Union(geom_3857, 'point')):
expected = geom_3857 | city.point.transform(3857, clone=True)
self.assertTrue(expected.equals_exact(city.union, tol))
self.assertEqual(city.union.srid, 3857)
def test_argument_validation(self):
with self.assertRaisesMessage(ValueError, 'SRID is required for all geometries.'):
City.objects.annotate(geo=functions.GeoFunc(Point(1, 1)))
msg = 'GeoFunc function requires a GeometryField in position 1, got CharField.'
with self.assertRaisesMessage(TypeError, msg):
City.objects.annotate(geo=functions.GeoFunc('name'))
msg = 'GeoFunc function requires a geometric argument in position 1.'
with self.assertRaisesMessage(TypeError, msg):
City.objects.annotate(union=functions.GeoFunc(1, 'point')).get(name='Dallas')
| {
"content_hash": "084fd5cc33d9149bad2663b5c06fa2ec",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 118,
"avg_line_length": 48.19823008849558,
"alnum_prop": 0.6243390129259695,
"repo_name": "taaviteska/django",
"id": "6e4a87f2126371f64e27971981ac37d91d0edf7b",
"size": "27232",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tests/gis_tests/geoapp/test_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "181561"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11857432"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import sys
import subprocess
| {
"content_hash": "7a0239b96eefdffee281d0e88c4a988c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 17,
"avg_line_length": 14.5,
"alnum_prop": 0.8620689655172413,
"repo_name": "theboocock/fine_mapping_pipeline",
"id": "1bf5e2847c9f59c7107b37561cf046e16573f2df",
"size": "55",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fine_mapping_pipeline/ucsc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4843"
},
{
"name": "C++",
"bytes": "588622"
},
{
"name": "Jupyter Notebook",
"bytes": "2651"
},
{
"name": "Makefile",
"bytes": "1490"
},
{
"name": "Objective-C",
"bytes": "9465"
},
{
"name": "Python",
"bytes": "95656"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class DeleteAnnotationResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'DeleteAnnotationResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # DeleteAnnotationResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
| {
"content_hash": "2350b45ef0cd7c151c80c1383284046c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 30.864864864864863,
"alnum_prop": 0.648861646234676,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "75289f736c722d7b47edb5ccad3d840e8af5a54a",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupdocs/models/DeleteAnnotationResponse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
} |
import pytest
from example.app.models import * # noqa
from model_mommy import mommy
from django.utils import timezone
@pytest.mark.django_db
def test_simple(es_client):
now = timezone.now()
test = SimpleObject.objects.create(
foo=1,
bar="Bar",
baz="baz",
published=now
)
assert test.to_dict() == {
"id": test.id,
"foo": 1,
"bar": "Bar",
"baz": "baz",
"published": now
}
@pytest.mark.django_db
def test_custom_field(es_client):
test = CustomFieldObject.objects.create(
color="#008E50"
)
assert test.to_dict() == {
"id": test.id,
"color": {
"red": "00",
"green": "8E",
"blue": "50"
}
}
@pytest.mark.django_db
def test_relatable(es_client):
simple = RelatedSimpleObject.objects.create(datums="Some datums")
nested = RelatedNestedObject.objects.create(denormalized_datums="Some denormalized datums")
test = RelatableObject.objects.create(
name="testing",
simple=simple,
nested=nested
)
assert test.to_dict() == {
"id": test.id,
"name": "testing",
"simple_id": simple.id,
"nested": {
"id": nested.id,
"denormalized_datums": "Some denormalized datums"
}
}
@pytest.mark.django_db
def test_poly_reference(es_client):
child_a = PolyChildA.objects.create(slug='slug', number=1)
child_b = PolyChildB.objects.create(album='st.anger', band_name='metallica')
parent_a = PolyParent.objects.get(id=child_a.id)
parent_b = PolyParent.objects.get(id=child_b.id)
poly_relationship_a = PolyRelationship.objects.create(poly_parent=parent_a)
assert poly_relationship_a.to_dict() == {
'poly_parent': {
'text': '',
'slug': 'slug',
'number': 1,
'polyparent_ptr_id': 1,
'id': 1
},
'id': 1
}
poly_relationship_b = PolyRelationship.objects.create(poly_parent=parent_b)
assert poly_relationship_b.to_dict() == {
'poly_parent': {
'album': 'st.anger',
'text': '',
'band_name': 'metallica',
'id': 2,
'polyparent_ptr_id': 2
},
'id': 2
}
@pytest.mark.django_db
def test_many_to_many(es_client):
tags = mommy.make(Tag, _quantity=3)
dumb_tags = mommy.make(DumbTag, _quantity=4)
test_object = mommy.make(RelationsTestObject, make_m2m=False)
test_object.tags.add(*tags)
test_object.dumb_tags.add(*dumb_tags)
document = test_object.to_dict()
assert document["id"] == test_object.id
assert document["data"] == test_object.data
assert len(document["tags"]) == 3
assert {"id": tags[0].id, "name": tags[0].name} in document["tags"]
# Not for now...
# assert len(document["dumb_tags"]) == 4
# assert dumb_tags[0].id in document["dumb_tags"]
@pytest.mark.django_db
def test_save_index():
content = SimpleObject.objects.create(foo=1)
# Added to index on create
SimpleObject.search_objects.refresh()
assert 1 == SimpleObject.search_objects.search().count()
# Remove From Index
content.save(index=False)
SimpleObject.search_objects.refresh()
assert 0 == SimpleObject.search_objects.search().count()
# Second save does not trigger 404/not-found-in-index
content.save(index=False)
SimpleObject.search_objects.refresh()
assert 0 == SimpleObject.search_objects.search().count()
# Re-insert into index
content.save(index=True)
SimpleObject.search_objects.refresh()
assert 1 == SimpleObject.search_objects.search().count()
| {
"content_hash": "398545327f56fc4e661032e64bd630f1",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 95,
"avg_line_length": 26.676258992805757,
"alnum_prop": 0.5957389428263214,
"repo_name": "theonion/djes",
"id": "353fff3c2fb79213cb5849ff6481ba68c27e013b",
"size": "3708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74133"
}
],
"symlink_target": ""
} |
from abc import abstractmethod
import numpy as np
from seizures.prediction.PredictorBase import PredictorBase
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
class AdaBoostTrees(PredictorBase):
""""
AdaBoost + Decision trees.
See http://scikit-learn.org/stable/auto_examples/ensemble/plot_adaboost_twoclass.html
@author: Wittawat
"""
def __init__(self, **options):
"""
options is a dictionary to be used as arguments to DecisionTreeClassifier.
"""
# No strong justification why max_depth=5. Change if needed.
self.clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, **options),
algorithm="SAMME",
n_estimators=100 )
def fit(self, X, y):
"""
Parameters:
X - 2d numpy array of training data. X.shape = [n_samples, d_features]
y - 1d numpy array of training labels
"""
print "fitting AdaBoost trees"
self.clf = self.clf.fit(X, y)
def predict(self, X):
"""
Method to apply the model data
Parameters:
X - 2d numpy array of test data
"""
# [:, 1] to get the second column, which contains the probabilies of
# of class being 1
return self.clf.predict_proba(X)[:, 1]
def __str__(self):
return "ABTrees"
def main():
N = 399
D = 20
X = np.random.rand(N, D)
y = np.random.randint(0, 2, N)
predictor = AdaBoostTrees(max_features=10)
predictor.fit(X, y)
x = np.random.rand(1, D)
pred = predictor.predict(x)
print pred
if __name__ == '__main__':
main()
| {
"content_hash": "09a09abd707e5b5d59d193694d1bea0c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 90,
"avg_line_length": 26.390625,
"alnum_prop": 0.6068679692125518,
"repo_name": "vincentadam87/gatsby-hackathon-seizure",
"id": "0377e58e788036866b0c1acb2ea1ffbabed4101b",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/python/seizures/prediction/Boosting.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Matlab",
"bytes": "603"
},
{
"name": "Python",
"bytes": "191528"
},
{
"name": "TeX",
"bytes": "4652"
}
],
"symlink_target": ""
} |
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import *
from django.http import QueryDict
from django.template import Template, Context
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.safestring import mark_safe
from django.utils.unittest import TestCase
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(TestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': u'John', 'last_name': u'Lennon', 'birthday': u'1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.errors.as_ul(), u'')
self.assertEqual(p.errors.as_text(), u'')
self.assertEqual(p.cleaned_data["first_name"], u'John')
self.assertEqual(p.cleaned_data["last_name"], u'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />')
self.assertEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />')
self.assertEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />')
try:
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""")
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', u'John'],
['Last name', u'Lennon'],
['Birthday', u'1940-10-9']
])
self.assertEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""")
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], [u'This field is required.'])
self.assertEqual(p.errors['last_name'], [u'This field is required.'])
self.assertEqual(p.errors['birthday'], [u'This field is required.'])
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({'first_name': u'John', 'last_name': u'\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
self.assertEqual(p.as_table(), u'<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>')
self.assertEqual(p.as_ul(), u'<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>')
self.assertEqual(p.as_p(), u'<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>')
p = Person({'last_name': u'Lennon'})
self.assertEqual(p.errors['first_name'], [u'This field is required.'])
self.assertEqual(p.errors['birthday'], [u'This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.errors.as_ul(), u'<ul class="errorlist"><li>first_name<ul class="errorlist"><li>This field is required.</li></ul></li><li>birthday<ul class="errorlist"><li>This field is required.</li></ul></li></ul>')
self.assertEqual(p.errors.as_text(), """* first_name
* This field is required.
* birthday
* This field is required.""")
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertEqual(p['first_name'].errors, [u'This field is required.'])
self.assertEqual(p['first_name'].errors.as_ul(), u'<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(p['first_name'].errors.as_text(), u'* This field is required.')
p = Person()
self.assertEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {'first_name': u'John', 'last_name': u'Lennon', 'birthday': u'1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], u'John')
self.assertEqual(p.cleaned_data['last_name'], u'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': u'John', 'last_name': u'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], u'')
self.assertEqual(f.cleaned_data['first_name'], u'John')
self.assertEqual(f.cleaned_data['last_name'], u'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': u'John', 'last_name': u'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], u'John')
self.assertEqual(f.cleaned_data['last_name'], u'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""")
self.assertEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""")
self.assertEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""")
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertEqual(str(f['email']), '<input type="text" name="email" />')
self.assertEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False)
self.assertEqual(str(f['email']), '<input type="text" name="email" value="[email protected]" />')
self.assertEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False)
self.assertEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False)
self.assertEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False)
self.assertEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False)
self.assertEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertEqual(str(f['message']), '<textarea rows="10" cols="40" name="message"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertEqual(f['subject'].as_textarea(), u'<textarea rows="10" cols="40" name="subject"></textarea>')
self.assertEqual(f['message'].as_text(), u'<input type="text" name="message" />')
self.assertEqual(f['message'].as_hidden(), u'<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertEqual(str(f['message']), '<textarea rows="80" cols="20" name="message"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertEqual(f['message'].as_text(), u'<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertEqual(f['subject'].as_textarea(), u'<textarea rows="10" cols="40" name="subject">Hello</textarea>')
self.assertEqual(f['message'].as_text(), u'<input type="text" name="message" value="I love you." />')
self.assertEqual(f['message'].as_hidden(), u'<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertEqual(str(f['language']), """<ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>""")
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>""")
self.assertEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>""")
self.assertEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>""")
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect)
f = BeatleForm(auto_id=False)
self.assertEqual('\n'.join([str(bf) for bf in f['name']]), """<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>""")
self.assertEqual('\n'.join(['<div>%s</div>' % bf for bf in f['name']]), """<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""")
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertEqual('\n'.join([str(bf) for bf in f['name']]), u'<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />')
self.assertEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />')
def test_mulitple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id=False)
self.assertEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id='%s_id')
self.assertEqual(str(f['composers']), """<ul>
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>""")
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict, MultiValueDict and
# MergeDict (when created as a merge of MultiValueDicts) conveniently work with
# this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MergeDict(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>""")
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], [u'This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], [u'J'])
self.assertEqual(f.cleaned_data['name'], u'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], [u'J', u'P'])
self.assertEqual(f.cleaned_data['name'], u'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']))
f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False)
self.assertEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""")
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>""")
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError(u'Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], [u'This field is required.'])
self.assertEqual(f.errors['password1'], [u'This field is required.'])
self.assertEqual(f.errors['password2'], [u'This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], [u'Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], u'adrian')
self.assertEqual(f.cleaned_data['password1'], u'foo')
self.assertEqual(f.cleaned_data['password2'], u'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. If you do this, any ValidationError raised by that
# method will not be associated with a particular field; it will have a
# special-case association with the field named '__all__'.
# Note that in Form.clean(), you have access to self.cleaned_data, a dictionary of
# all the fields/values that have *not* raised a ValidationError. Also note
# Form.clean() is required to return a dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError(u'Please make sure your passwords match.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.errors['username'], [u'This field is required.'])
self.assertEqual(f.errors['password1'], [u'This field is required.'])
self.assertEqual(f.errors['password2'], [u'This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], [u'Please make sure your passwords match.'])
self.assertEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.as_ul(), """<li><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>""")
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], u'adrian')
self.assertEqual(f.cleaned_data['password1'], u'foo')
self.assertEqual(f.cleaned_data['password2'], u'foo')
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""")
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'}))
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertFalse(f1.fields['myfield'].validators[0] == f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""")
self.assertEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""")
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""")
self.assertEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""")
self.assertEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""")
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""")
self.assertEqual(p.as_p(), """<ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""")
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Password (again)')
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password (again): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>""")
self.assertEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""")
# A label can be a Unicode object or a bytestring with special characters.
class UserRegistration(Form):
username = CharField(max_length=10, label='ŠĐĆŽćžšđ')
password = CharField(widget=PasswordInput, label=u'\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), u'<li>\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111: <input type="text" name="username" maxlength="10" /></li>\n<li>\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111: <input type="password" name="password" /></li>')
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
f = FavoriteForm(auto_id=False)
self.assertEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix=u'\u2192')
self.assertEqual(f.as_ul(), u'<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>')
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': u''}, auto_id=False)
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': u'foo'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], [u'This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': u''}, initial={'username': 'django'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': u'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], [u'This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f','foo'),('b','bar'),('w','whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f','b']
def initial_other_options():
return ['b','w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': u''}, initial={'username': initial_django}, auto_id=False)
self.assertEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': u'foo', 'options':['f','b']}, initial={'username': initial_django}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], [u'This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f','foo'),('b','bar'),('w','whiz')], initial=initial_other_options)
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>""")
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput, help_text='Choose wisely.')
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /> <span class="helptext">Choose wisely.</span></li>""")
self.assertEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></p>
<p>Password: <input type="password" name="password" /> <span class="helptext">Choose wisely.</span></p>""")
self.assertEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., [email protected]</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Choose wisely.</span></td></tr>""")
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': u'foo'}, auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Choose wisely.</span></li>""")
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""")
# Help text can include arbitrary Unicode characters.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='ŠĐĆŽćžšđ')
p = UserRegistration(auto_id=False)
self.assertEqual(p.as_ul(), u'<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111</span></li>')
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
m = Musician(auto_id=False)
self.assertEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>""")
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertEqual(b.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': u'John',
'person1-last_name': u'Lennon',
'person1-birthday': u'1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""")
self.assertEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />')
self.assertEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />')
self.assertEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />')
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], u'John')
self.assertEqual(p.cleaned_data['last_name'], u'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': u'',
'person1-last_name': u'',
'person1-birthday': u''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], [u'This field is required.'])
self.assertEqual(p.errors['last_name'], [u'This field is required.'])
self.assertEqual(p.errors['birthday'], [u'This field is required.'])
self.assertEqual(p['first_name'].errors, [u'This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': u'John',
'last_name': u'Lennon',
'birthday': u'1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], [u'This field is required.'])
self.assertEqual(p.errors['last_name'], [u'This field is required.'])
self.assertEqual(p.errors['birthday'], [u'This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': u'John',
'person1-last_name': u'Lennon',
'person1-birthday': u'1940-10-9',
'person2-first_name': u'Jim',
'person2-last_name': u'Morrison',
'person2-birthday': u'1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], u'John')
self.assertEqual(p1.cleaned_data['last_name'], u'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], u'Jim')
self.assertEqual(p2.cleaned_data['last_name'], u'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return self.prefix and '%s-prefix-%s' % (self.prefix, field_name) or field_name
p = Person(prefix='foo')
self.assertEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""")
data = {
'foo-prefix-first_name': u'John',
'foo-prefix-last_name': u'Lennon',
'foo-prefix-birthday': u'1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], u'John')
self.assertEqual(p.cleaned_data['last_name'], u'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': u'Joe'}, auto_id=False)
self.assertEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': u'Joe', 'is_cool': u'1'}, auto_id=False)
self.assertEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': u'Joe', 'is_cool': u'2'}, auto_id=False)
self.assertEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': u'Joe', 'is_cool': u'3'}, auto_id=False)
self.assertEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': u'Joe', 'is_cool': True}, auto_id=False)
self.assertEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': u'Joe', 'is_cool': False}, auto_id=False)
self.assertEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', '')}, auto_id=False)
self.assertEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', 'some content')}, auto_id=False)
self.assertEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह')}, auto_id=False)
self.assertEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError(u'Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % form.cleaned_data
t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 3: POST with valid data (the success message).)
self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}), "VALID: {'username': u'adrian', 'password1': u'secret', 'password2': u'secret'}")
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError(u'Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }}: {{ form.username }}</p>
<p>{{ form.password1.label_tag }}: {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }}: {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username</label>: <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1</label>: <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2</label>: <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }}: {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }}: {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }}: {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), u'')
# The label_tag() method takes an optional attrs argument: a dictionary of HTML
# attributes to add to the <label> tag.
f = UserRegistration(auto_id='id_%s')
form_output = []
for bf in f:
form_output.append(bf.label_tag(attrs={'class': 'pretty'}))
self.assertEqual(form_output, [
'<label for="id_username" class="pretty">Username</label>',
'<label for="id_password1" class="pretty">Password1</label>',
'<label for="id_password2" class="pretty">Password2</label>',
])
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<ul class="errorlist"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# agrument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': [u'This field is required.'], 'artist': [u'This field is required.']})
try:
form.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': [u'This field is required.']})
try:
form.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>')
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="text" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></li>""")
self.assertEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p class="required"><label for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="text" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>""")
self.assertEqual(p.as_table(), """<tr class="required error"><th><label for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td><input type="text" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="age" id="id_age" /></td></tr>""")
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=widgets.SplitHiddenDateTimeWidget)
form = EventForm()
self.assertEqual(form.as_ul(), u'<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />')
| {
"content_hash": "18a85a73bfbe37c7a93e2097114d55b3",
"timestamp": "",
"source": "github",
"line_count": 1776,
"max_line_length": 516,
"avg_line_length": 60.344031531531535,
"alnum_prop": 0.6224351736943763,
"repo_name": "mixman/djangodev",
"id": "4e9cc16f235f8a1bea38b58e21068d30e71ed755",
"size": "107295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/forms/tests/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
from io import StringIO
from unittest import TestCase
from po_localization.parser import Parser, ParseError, parse_po_file, parse_po_filename
from po_localization.po_file import PoFile
class ParserTestCase(TestCase):
def _parse_and_expect(self, file_content, expected_catalog):
file_object = StringIO(file_content)
self.assertDictEqual(expected_catalog, parse_po_file(file_object))
def _parse_and_expect_failure(self, file_content):
file_content = StringIO(file_content)
self.assertRaises(ParseError, parse_po_file, file_content)
def test_empty_file(self):
self._parse_and_expect("", {})
def test_comment(self):
self._parse_and_expect("""
# Comment
""", {})
def test_empty_line(self):
self._parse_and_expect("""
""", {})
def test_simple(self):
self._parse_and_expect("""
msgid "Message to translate"
msgstr "Message translated"
""", {
"Message to translate": "Message translated"
})
def test_unescaping(self):
self._parse_and_expect(r"""
msgid "Line One\nLine Two"
msgstr "First Line\nSecond Line"
""", {
"Line One\nLine Two": "First Line\nSecond Line"
})
def test_broken_unescaping(self):
self._parse_and_expect_failure(r"""
msgid "Line One\"
"nLine Two"
msgstr "First Line\nSecond Line"
""")
def test_header_not_in_catalog(self):
self._parse_and_expect(r"""
msgid ""
msgstr ""
"Project-Id-Version: Django\n"
msgid "Message to translate"
msgstr "Message translated"
""", {
"Message to translate": "Message translated"
})
def test_context(self):
self._parse_and_expect("""
msgctxt "Context"
msgid "Message to translate"
msgstr "Translated message"
""", {
"Context\x04Message to translate": "Translated message"
})
def test_plural(self):
self._parse_and_expect("""
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0] "Translated message"
msgstr[1] "Translated messages"
""", {
("Message to translate", 0): "Translated message",
("Message to translate", 1): "Translated messages"
})
def test_context_and_plural(self):
self._parse_and_expect("""
msgctxt "Context"
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0] "Translated message"
msgstr[1] "Translated messages"
""",
{
("Context\x04Message to translate", 0): "Translated message",
("Context\x04Message to translate", 1): "Translated messages"
})
def test_unexpected_keywords(self):
self._parse_and_expect_failure("""
msgid "Message to translate"
msgctxt "Context"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgctxt "Context"
msgstr "Translated message"
msgid "Message to translate"
""")
self._parse_and_expect_failure("""
poney "Poney"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0] "Translated message"
msgstr "Translated messages"
""")
def test_duplicate_keywords(self):
self._parse_and_expect_failure("""
msgctxt "Context"
msgctxt "Context"
msgid "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgctxt "Context"
msgid "Message to translate"
msgid "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgctxt "Context"
msgid "Message to translate"
msgstr "Translated message"
msgstr "Translated message"
""")
def test_duplicate_plural_index(self):
self._parse_and_expect_failure("""
msgctxt "Context"
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0] "Translated message"
msgstr[0] "Translated message again"
""")
def test_early_termination(self):
self._parse_and_expect_failure("""
msgctxt "Context"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
"... with continuation"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
# With comment
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
""")
def test_missing_string(self):
self._parse_and_expect_failure("""
msgctxt
msgid "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgid
msgstr "Translated Message"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgstr
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural
msgstr[0] "Translated message"
msgstr[1] "Translated messages"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0]
msgstr[1] "Translated messages"
""")
def test_partial_string(self):
self._parse_and_expect_failure("""
msgctxt "Context
msgid "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgid "Message to translate
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgstr "Translated message
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate
msgstr[0] "Translated message"
msgstr[1] "Translated messages"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0] "Translated message
msgstr[1] "Translated messages"
""")
def test_unexpected_index(self):
self._parse_and_expect_failure("""
msgctxt[0] "Context"
msgid "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgctxt "Context"
msgid[0] "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgctxt "Context"
msgid "Message to translate"
msgstr[0] "Translated message"
""")
self._parse_and_expect_failure("""
msgctxt "Context"
msgid "Message to translate"
msgid_plural[0] "Messages to translate"
msgstr[0] "Translated message"
msgstr[1] "Translated messages"
""")
def test_unexpected_continuation(self):
self._parse_and_expect_failure("""
"Continuation"
""")
self._parse_and_expect_failure("""
msgctxt "context"
"Continuation"
""")
def test_garbage_at_end(self):
self._parse_and_expect_failure("""
msgctxt "Context" GARBAGE
msgid "Message to translate"
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgid "Message to translate" GARBAGE
msgstr "Translated message"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgstr "Translated message" GARBAGE
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate" GARBAGE
msgstr[0] "Translated message"
msgstr[1] "Translated messages"
""")
self._parse_and_expect_failure("""
msgid "Message to translate"
msgid_plural "Messages to translate"
msgstr[0] "Translated message" GARBAGE
msgstr[1] "Translated messages"
""")
def test_real_file(self):
filename = os.path.join(os.path.dirname(__file__), 'sample.po')
self.assertDictEqual(parse_po_filename(filename), {
("Context\x04Message to translate", 0): "Message à traduire",
("Context\x04Message to translate", 1): "Messages à traduire",
})
def test_header_parsing(self):
file_object = StringIO(r"""
msgid ""
msgstr ""
"Project-Id-Version: Django\n"
"Report-Msgid-Bugs-To: \n"
"Language-Team: French <None>\n"
"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
""")
po_file = PoFile()
self.assertListEqual(po_file.header_fields, [])
self.assertIsNone(po_file.get_nplurals())
parser = Parser(po_file)
parser.parse_po_file(file_object)
self.assertListEqual(po_file.header_fields, [
('Project-Id-Version', 'Django'),
('Report-Msgid-Bugs-To', ''),
('Language-Team', 'French <None>'),
('Language', 'fr'),
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset=UTF-8'),
('Content-Transfer-Encoding', '8bit'),
('Plural-Forms', 'nplurals=2; plural=(n > 1)')])
self.assertEqual(po_file.get_nplurals(), 2)
def test_exception_message(self):
try:
raise ParseError("filename.po", 42, "the error message")
except ParseError as error:
self.assertEqual("filename.po:42: the error message", "{}".format(error))
try:
raise ParseError("filename.po", None, "unexpected end of file")
except ParseError as error:
self.assertEqual("filename.po: unexpected end of file", "{}".format(error))
try:
raise ParseError(None, 42, "the error message")
except ParseError as error:
self.assertEqual("line 42: the error message", "{}".format(error))
try:
raise ParseError(None, None, "the error message")
except ParseError as error:
self.assertEqual("the error message", "{}".format(error))
| {
"content_hash": "de734354e095fc17839fb9e751c1ed33",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 87,
"avg_line_length": 27.94,
"alnum_prop": 0.6540546068105123,
"repo_name": "kmichel/po-localization",
"id": "fb57aed2dd7637abf150b857e184ef1932c66cd5",
"size": "9797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "po_localization/tests/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91595"
}
],
"symlink_target": ""
} |
"""
Scenariocompare: compare two scenarios, concentrating on csv files in the outputs/data subfolders
"""
import os
import pathlib
import sys
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
def main(s1, s2):
print(f"FILE, FIELD, RMSE, ERROR")
after_root = pathlib.Path(s2)
for before in pathlib.Path(s1).rglob("*.csv"):
if r"building-properties\schedules" in str(before):
# exclude these as they're not really .csv files
continue
if r"technology\archetypes\use_types" in str(before):
# exclude these as they're not really .csv files
continue
rel_path = before.relative_to(s1)
try:
before_df = pd.read_csv(before)
except pd.errors.EmptyDataError:
# No columns to parse from file
continue
diff_df = before_df.copy()
after = after_root.joinpath(rel_path)
after_df = pd.read_csv(after) if after.exists() else None
# save after_df back to .csv with same ordering of fields (makes it easier to debug the files manually)
if after_df is not None:
after_df.to_csv(after, columns=before_df.columns, index=False)
float_fields = [f for f in before_df.dtypes.index if before_df.dtypes[f] == "float64"]
for f in float_fields:
if after_df is None or not f in after_df:
error = "left only"
rmse = np.nan
diff_df[f] = np.nan
else:
try:
error = "ok"
rmse = mean_squared_error(after_df[f], before_df[f])
diff_df[f] = round(before_df[f] - after_df[f], 5)
except Exception as e:
error = e
rmse = np.nan
print(f"{rel_path}, {f}, {rmse:.10f}, {str(error).replace(',', '_')}")
try:
diff_df.to_csv(f"{after}-diff.csv", columns=before_df.columns, index=False)
except FileNotFoundError:
# just ignore this - folder might not exist if the after file was not written
pass
if __name__ == "__main__":
s1, s2 = map(os.path.abspath, sys.argv[1:]) # note: this will fail if not used correctly...
main(s1, s2)
| {
"content_hash": "1dbf3e5ba778a06672115545849e8251",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 111,
"avg_line_length": 33.94117647058823,
"alnum_prop": 0.5680242634315424,
"repo_name": "architecture-building-systems/CEAforArcGIS",
"id": "06dca4dfc7356cf767f544158b66b298d47fee3c",
"size": "2308",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/scenariocompare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2776"
},
{
"name": "Jupyter Notebook",
"bytes": "135743"
},
{
"name": "Makefile",
"bytes": "675"
},
{
"name": "NSIS",
"bytes": "3948"
},
{
"name": "Python",
"bytes": "1217645"
},
{
"name": "Shell",
"bytes": "7194"
}
],
"symlink_target": ""
} |
import os
import shutil
import json
from pelican import generators
from pelican import signals
import datastore
class AssetGenerator(generators.Generator):
"""Generates downloadable assets from a Pelican Datastore."""
def __init__(self, *args, **kwargs):
super(AssetGenerator, self).__init__(*args, **kwargs)
self.datastore_path = self.settings['DATASTORE']['location']
self.dest_path = os.path.join(self.settings['OUTPUT_PATH'],
self.settings['THEME_STATIC_DIR'],
self.settings['DATASTORE']['assets']['location'])
self.archive_format = 'gztar'
self.timestamp = self.settings['TIMESTAMP']
# self.assets_exclude = self.settings['DATASTORE']['assets']['exclude']
def write_archive(self):
"""Write an archive of the data as a public asset."""
name = '{0}{1}'.format('opendataindex_data_', self.timestamp)
archive_name = os.path.join(self.dest_path, name)
shutil.make_archive(archive_name, self.archive_format,
self.datastore_path)
def generate_output(self, writer):
"""Write the assets based on configuration."""
self.write_archive()
# TODO: Other public asset generators
def get_generators(pelican_object):
return AssetGenerator
def register():
signals.get_generators.connect(get_generators)
| {
"content_hash": "51b88d5fa720df52f0991768d0a75eda",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 87,
"avg_line_length": 33.348837209302324,
"alnum_prop": 0.6290097629009763,
"repo_name": "cybergreen-net/stats",
"id": "924cc1931b0b6ae9e77a6eebc92a2f78a30a5582",
"size": "1434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/datastore_assets/datastore_assets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "276490"
},
{
"name": "HTML",
"bytes": "40947"
},
{
"name": "JavaScript",
"bytes": "37572"
},
{
"name": "Python",
"bytes": "24332"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/dungeon/shared_trash_container.iff"
result.attribute_template_id = -1
result.stfName("obj_n","trash_container")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ca40a97dcc343659994a5109663e37bf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.307692307692307,
"alnum_prop": 0.6963696369636964,
"repo_name": "anhstudios/swganh",
"id": "705896a1a57f44211490da2e5060c2a525fdb735",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/dungeon/shared_trash_container.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.