repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ewheeler/rapidpro | temba/values/tests.py | 1 | 20667 | from __future__ import unicode_literals
import json
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from temba.contacts.models import ContactField
from temba.flows.models import RuleSet
from temba.orgs.models import Language
from temba.tests import FlowFileTest
from .models import Value
class ResultTest(FlowFileTest):
def assertResult(self, result, index, category, count):
self.assertEquals(count, result['categories'][index]['count'])
self.assertEquals(category, result['categories'][index]['label'])
def test_field_results(self):
c1 = self.create_contact("Contact1", '0788111111')
c2 = self.create_contact("Contact2", '0788222222')
c3 = self.create_contact("Contact3", '0788333333')
self.create_contact("Contact4", '0788444444')
# create a gender field that uses strings
gender = ContactField.get_or_create(self.org, self.admin, 'gender', label="Gender", value_type=Value.TYPE_TEXT)
c1.set_field(self.user, 'gender', "Male")
c2.set_field(self.user, 'gender', "Female")
c3.set_field(self.user, 'gender', "Female")
result = Value.get_value_summary(contact_field=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset']) # this is two as we have the default contact created by our unit tests
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Female", 2)
self.assertResult(result, 1, "Male", 1)
# create an born field that uses decimals
born = ContactField.get_or_create(self.org, self.admin, 'born', label="Born", value_type=Value.TYPE_DECIMAL)
c1.set_field(self.user, 'born', 1977)
c2.set_field(self.user, 'born', 1990)
c3.set_field(self.user, 'born', 1977)
result = Value.get_value_summary(contact_field=born)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset'])
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "1977", 2)
self.assertResult(result, 1, "1990", 1)
# ok, state field!
state = ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
c1.set_field(self.user, 'state', "Kigali City")
c2.set_field(self.user, 'state', "Kigali City")
result = Value.get_value_summary(contact_field=state)[0]
self.assertEquals(1, len(result['categories']))
self.assertEquals(2, result['set'])
self.assertEquals(3, result['unset'])
self.assertResult(result, 0, "1708283", 2)
reg_date = ContactField.get_or_create(self.org, self.admin, 'reg_date', label="Registration Date", value_type=Value.TYPE_DATETIME)
now = timezone.now()
c1.set_field(self.user, 'reg_date', now.replace(hour=9))
c2.set_field(self.user, 'reg_date', now.replace(hour=4))
c3.set_field(self.user, 'reg_date', now - timedelta(days=1))
result = Value.get_value_summary(contact_field=reg_date)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset'])
self.assertResult(result, 0, now.replace(hour=0, minute=0, second=0, microsecond=0), 2)
self.assertResult(result, 1, (now - timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0), 1)
# make sure categories returned are sorted by count, not name
c2.set_field(self.user, 'gender', "Male")
result = Value.get_value_summary(contact_field=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset']) # this is two as we have the default contact created by our unit tests
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Male", 2)
self.assertResult(result, 1, "Female", 1)
# check the modified date is tracked for fields
original_value = Value.objects.get(contact=c1, contact_field=gender)
c1.set_field(self.user, 'gender', 'unknown')
new_value = Value.objects.get(contact=c1, contact_field=gender)
self.assertTrue(new_value.modified_on > original_value.modified_on)
self.assertNotEqual(new_value.string_value, original_value.string_value)
def run_color_gender_flow(self, contact, color, gender, age):
self.assertEqual(self.send_message(self.flow, color, contact=contact, restart_participants=True), "What is your gender?")
self.assertEqual(self.send_message(self.flow, gender, contact=contact), "What is your age?")
self.assertEqual(self.send_message(self.flow, age, contact=contact), "Thanks.")
def setup_color_gender_flow(self):
self.flow = self.get_flow('color_gender_age')
(self.c1, self.c2, self.c3, self.c4) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'))
def test_category_results(self):
self.setup_color_gender_flow()
# create a state field:
# assign c1 and c2 to Kigali
ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
ContactField.get_or_create(self.org, self.admin, 'district', label="District", value_type=Value.TYPE_DISTRICT)
self.c1.set_field(self.user, 'state', "Kigali City")
self.c1.set_field(self.user, 'district', "Nyarugenge")
self.c2.set_field(self.user, 'state', "Kigali City")
self.c2.set_field(self.user, 'district', "Nyarugenge")
self.run_color_gender_flow(self.c1, "red", "male", "16")
self.run_color_gender_flow(self.c2, "blue", "female", "19")
self.run_color_gender_flow(self.c3, "green", "male", "75")
self.run_color_gender_flow(self.c4, "maroon", "female", "50")
# create a group of the women
ladies = self.create_group("Ladies", [self.c2, self.c4])
# get our rulesets
color = RuleSet.objects.get(flow=self.flow, label="Color")
gender = RuleSet.objects.get(flow=self.flow, label="Gender")
age = RuleSet.objects.get(flow=self.flow, label="Age")
# fetch our results through the view
self.login(self.admin)
response = self.client.get(reverse('flows.ruleset_results', args=[color.pk]))
response = json.loads(response.content)
categories = response['results'][0]['categories']
self.assertEqual('Red', categories[0]['label'])
self.assertEqual('Blue', categories[1]['label'])
self.assertEqual('Green', categories[2]['label'])
self.assertEqual(2, categories[0]['count'])
self.assertEqual(1, categories[1]['count'])
self.assertEqual(1, categories[2]['count'])
# categories should be in the same order as our rules, should have correct counts
result = Value.get_value_summary(ruleset=color)[0]
self.assertEquals(3, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Red", 2)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 1)
# check our age category as well
result = Value.get_value_summary(ruleset=age)[0]
self.assertEquals(3, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Child", 1)
self.assertResult(result, 1, "Adult", 2)
self.assertResult(result, 2, "Senior", 1)
# and our gender categories
result = Value.get_value_summary(ruleset=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Male", 2)
self.assertResult(result, 1, "Female", 2)
# now filter the results and only get responses by men
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"])])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 1)
# what about men that are adults?
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"]),
dict(ruleset=age.pk, categories=["Adult"])])[0]
self.assertResult(result, 0, "Red", 0)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 0)
# union of all genders
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male", "Female"]),
dict(ruleset=age.pk, categories=["Adult"])])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 0)
# just women adults by group
result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 0)
# remove one of the women from the group
ladies.update_contacts(self.user, [self.c2], False)
# get a new summary
result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 0)
# ok, back in she goes
ladies.update_contacts(self.user, [self.c2], True)
# do another run for contact 1
self.run_color_gender_flow(self.c1, "blue", "male", "16")
# totals should reflect the new value, not the old
result = Value.get_value_summary(ruleset=color)[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 2)
self.assertResult(result, 2, "Green", 1)
# what if we do a partial run?
self.send_message(self.flow, "red", contact=self.c1, restart_participants=True)
# should change our male/female breakdown since c1 now no longer has a gender
result = Value.get_value_summary(ruleset=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertResult(result, 0, "Male", 1)
self.assertResult(result, 1, "Female", 2)
# back to a full flow
self.run_color_gender_flow(self.c1, "blue", "male", "16")
# ok, now segment by gender
result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(ruleset=gender.pk, categories=["Male", "Female"]))
male_result = result[0]
self.assertResult(male_result, 0, "Red", 0)
self.assertResult(male_result, 1, "Blue", 1)
self.assertResult(male_result, 2, "Green", 1)
female_result = result[1]
self.assertResult(female_result, 0, "Red", 1)
self.assertResult(female_result, 1, "Blue", 1)
self.assertResult(female_result, 2, "Green", 0)
# segment by gender again, but use the contact field to do so
result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(contact_field="Gender", values=["MALE", "Female"]))
male_result = result[0]
self.assertResult(male_result, 0, "Red", 0)
self.assertResult(male_result, 1, "Blue", 1)
self.assertResult(male_result, 2, "Green", 1)
female_result = result[1]
self.assertResult(female_result, 0, "Red", 1)
self.assertResult(female_result, 1, "Blue", 1)
self.assertResult(female_result, 2, "Green", 0)
# add in a filter at the same time
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=color.pk, categories=["Blue"])],
segment=dict(ruleset=gender.pk, categories=["Male", "Female"]))
male_result = result[0]
self.assertResult(male_result, 0, "Red", 0)
self.assertResult(male_result, 1, "Blue", 1)
self.assertResult(male_result, 2, "Green", 0)
female_result = result[1]
self.assertResult(female_result, 0, "Red", 0)
self.assertResult(female_result, 1, "Blue", 1)
self.assertResult(female_result, 2, "Green", 0)
# ok, try segmenting by location instead
result = Value.get_value_summary(ruleset=color, segment=dict(location="State"))
eastern_result = result[0]
self.assertEquals('171591', eastern_result['boundary'])
self.assertEquals('Eastern Province', eastern_result['label'])
self.assertResult(eastern_result, 0, "Red", 0)
self.assertResult(eastern_result, 1, "Blue", 0)
self.assertResult(eastern_result, 2, "Green", 0)
kigali_result = result[1]
self.assertEquals('1708283', kigali_result['boundary'])
self.assertEquals('Kigali City', kigali_result['label'])
self.assertResult(kigali_result, 0, "Red", 0)
self.assertResult(kigali_result, 1, "Blue", 2)
self.assertResult(kigali_result, 2, "Green", 0)
# updating state location leads to updated data
self.c2.set_field(self.user, 'state', "Eastern Province")
result = Value.get_value_summary(ruleset=color, segment=dict(location="State"))
eastern_result = result[0]
self.assertEquals('171591', eastern_result['boundary'])
self.assertEquals('Eastern Province', eastern_result['label'])
self.assertResult(eastern_result, 0, "Red", 0)
self.assertResult(eastern_result, 1, "Blue", 1)
self.assertResult(eastern_result, 2, "Green", 0)
kigali_result = result[1]
self.assertEquals('1708283', kigali_result['boundary'])
self.assertEquals('Kigali City', kigali_result['label'])
self.assertResult(kigali_result, 0, "Red", 0)
self.assertResult(kigali_result, 1, "Blue", 1)
self.assertResult(kigali_result, 2, "Green", 0)
# segment by district instead
result = Value.get_value_summary(ruleset=color, segment=dict(parent="1708283", location="District"))
# only on district in kigali
self.assertEquals(1, len(result))
kigali_result = result[0]
self.assertEquals('3963734', kigali_result['boundary'])
self.assertEquals('Nyarugenge', kigali_result['label'])
self.assertResult(kigali_result, 0, "Red", 0)
self.assertResult(kigali_result, 1, "Blue", 2)
self.assertResult(kigali_result, 2, "Green", 0)
# do a sanity check on our choropleth view
self.login(self.admin)
response = self.client.get(reverse('flows.ruleset_choropleth', args=[color.pk]) +
"?_format=json&boundary=" + self.org.country.osm_id)
# response should be valid json
response = json.loads(response.content)
# should have breaks
self.assertTrue('breaks' in response)
# should have two categories, Blue and Others
self.assertEquals(2, len(response['categories']))
self.assertEquals("Blue", response['categories'][0])
self.assertEquals("Others", response['categories'][1])
# assert our kigali result
kigali_result = response['scores']['1708283']
self.assertEquals(1, kigali_result['score'])
self.assertEquals("Kigali City", kigali_result['name'])
self.assertEquals("Blue", kigali_result['results'][0]['label'])
self.assertEquals("Others", kigali_result['results'][1]['label'])
self.assertEquals(1, kigali_result['results'][0]['count'])
self.assertEquals(0, kigali_result['results'][1]['count'])
self.assertEquals(100, kigali_result['results'][0]['percentage'])
self.assertEquals(0, kigali_result['results'][1]['percentage'])
with patch('temba.values.models.Value.get_value_summary') as mock:
mock.return_value = []
response = self.client.get(reverse('flows.ruleset_choropleth', args=[color.pk]) +
"?_format=json&boundary=" + self.org.country.osm_id)
# response should be valid json
response = json.loads(response.content)
# should have two categories, Blue and Others
self.assertEquals(2, len(response['categories']))
self.assertEquals("", response['categories'][0])
self.assertEquals("", response['categories'][1])
# all counts and percentage are 0
self.assertEquals(0, response['totals']['count'])
self.assertEquals(0, response['totals']['results'][0]['count'])
self.assertEquals(0, response['totals']['results'][0]['percentage'])
self.assertEquals(0, response['totals']['results'][1]['count'])
self.assertEquals(0, response['totals']['results'][1]['percentage'])
# and empty string labels
self.assertEquals("", response['totals']['results'][0]['label'])
self.assertEquals("", response['totals']['results'][1]['label'])
# also check our analytics view
response = self.client.get(reverse('flows.ruleset_analytics'))
# make sure we have only one flow in it
flows = json.loads(response.context['flows'])
self.assertEquals(1, len(flows))
self.assertEquals(3, len(flows[0]['rules']))
def test_open_ended_word_frequencies(self):
flow = self.get_flow('random_word')
def run_flow(contact, word):
self.assertEquals("Thank you", self.send_message(flow, word, contact=contact, restart_participants=True))
(c1, c2, c3, c4, c5, c6) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'),
self.create_contact("Contact5", '0788555555'),
self.create_contact("Contact6", '0788666666', is_test=True))
run_flow(c1, "1 better place")
run_flow(c2, "the great coffee")
run_flow(c3, "1 cup of black tea")
run_flow(c4, "awesome than this encore")
run_flow(c5, "from an awesome place in kigali")
run_flow(c6, "awesome coffee")
random = RuleSet.objects.get(flow=flow, label="Random")
result = Value.get_value_summary(ruleset=random)[0]
self.assertEquals(10, len(result['categories']))
self.assertTrue(result['open_ended'])
self.assertResult(result, 0, "awesome", 2)
self.assertResult(result, 1, "place", 2)
self.assertResult(result, 2, "better", 1)
self.assertResult(result, 3, "black", 1)
self.assertResult(result, 4, "coffee", 1)
self.assertResult(result, 5, "cup", 1)
self.assertResult(result, 6, "encore", 1)
self.assertResult(result, 7, "great", 1)
self.assertResult(result, 8, "kigali", 1)
self.assertResult(result, 9, "tea", 1)
# add French to org languages
Language.create(self.org, self.admin, 'French', 'fre')
# make sure we cleared the cache
Value.invalidate_cache(ruleset=random)
# encore is a french stop word and should not be included this time
result = Value.get_value_summary(ruleset=random)[0]
self.assertEquals(9, len(result['categories']))
self.assertTrue(result['open_ended'])
self.assertResult(result, 0, "awesome", 2)
self.assertResult(result, 1, "place", 2)
self.assertResult(result, 2, "better", 1)
self.assertResult(result, 3, "black", 1)
self.assertResult(result, 4, "coffee", 1)
self.assertResult(result, 5, "cup", 1)
self.assertResult(result, 6, "great", 1)
self.assertResult(result, 7, "kigali", 1)
self.assertResult(result, 8, "tea", 1)
| agpl-3.0 |
rdelval/aurora | src/test/python/apache/thermos/cli/commands/test_simplerun.py | 13 | 1425 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import mock
from apache.thermos.cli.commands.simplerun import simplerun
@mock.patch('apache.thermos.cli.commands.simplerun.really_run')
def test_simplerun(really_run_mock):
options_mock = mock.Mock(
spec_set=('root', 'user', 'name', 'task_id', 'prebound_ports', 'bindings', 'daemon'))
options_mock.root = '/tmp/root'
options_mock.user = getpass.getuser()
options_mock.name = 'simple'
options_mock.task_id = None
options_mock.prebound_ports = []
options_mock.bindings = {}
options_mock.daemon = False
simplerun(['--', 'echo', 'hello', 'world'], options_mock)
args, kw = really_run_mock.call_args
thermos_task, root, sandbox = args
assert str(thermos_task.task.name()) == options_mock.name
assert str(thermos_task.task.processes()[0].cmdline()) == 'echo hello world'
assert root == '/tmp/root'
assert sandbox is not None
| apache-2.0 |
daskos/mentos | mentos/utils.py | 2 | 4957 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from binascii import a2b_base64, b2a_base64
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from mentos.exceptions import (DetectorClosed, NoLeadingMaster,
NoRedirectException)
from tornado import gen, ioloop
from tornado.escape import json_decode, json_encode
from zoonado import Zoonado
log = logging.getLogger(__name__)
decode = json_decode
encode = json_encode
def encode_data(data):
return b2a_base64(data).strip().decode('ascii')
def decode_data(data):
return a2b_base64(data)
_workers = ThreadPool(10)
def run_background(func, callback, args=(), kwds={}):
def _callback(result):
ioloop.IOLoop.instance().add_callback(lambda: callback(result))
_workers.apply_async(func, args, kwds, _callback)
@contextmanager
def log_errors(pdb=False): # pragma: no cover
try:
yield
except (gen.Return):
raise
except Exception as e:
log.exception(e)
if pdb:
import pdb
pdb.set_trace()
raise
POSTFIX = {
'ns': 1e-9,
'us': 1e-6,
'ms': 1e-3,
'secs': 1,
'mins': 60,
'hrs': 60 * 60,
'days': 24 * 60 * 60,
'weeks': 7 * 24 * 60 * 60
}
def parse_duration(s):
s = s.strip()
unit = None
postfix = None
for postfix, unit in POSTFIX.items():
if s.endswith(postfix):
try:
return float(s[:-len(postfix)]) * unit
except ValueError: # pragma: no cover
continue
raise Exception('Unknown duration `{}`; supported units are {}'.format(
s, ','.join('`{}`'.format(n) for n in POSTFIX)))
class MasterInfo(object):
detector = None
def __init__(self, uri):
self.uri = uri
self.seq = None
self.info = {'address': {}}
self.closing = False
if 'zk://' in uri:
log.warn('Using Zookeeper for Discovery')
self.quorum = ','.join([zoo[zoo.index('://') + 3:]
for zoo in self.uri.split(',')])
self.detector = Zoonado(self.quorum, session_timeout=6000)
ioloop.IOLoop.current().add_callback(self.detector.start)
self.current_location = None
def redirected_uri(self, uri):
if not self.detector:
self.uri = uri
else:
raise NoRedirectException(
'Using Zookeeper, cannot set a redirect url')
@gen.coroutine
def get_endpoint(self, path=''):
if self.closing:
raise DetectorClosed('Detecor is closed')
if self.detector:
children = yield self.detector.get_children('/mesos')
children = [child for child in children if child != 'log_replicas']
if not children: # pragma: no cover
log.error('No leading Master found in zookeeper')
raise NoLeadingMaster('No leading Master found in zookeeper')
self.seq = min(children)
data = yield self.detector.get_data('/mesos/' + self.seq)
self.info = decode(data)
else:
host_port = self.uri.split(':')
log.debug(host_port)
if len(host_port) == 2:
self.info['address']['hostname'] = host_port[0]
self.info['address']['port'] = int(host_port[1])
else:
self.info['address']['hostname'] = host_port[0]
self.info['address']['port'] = 5050
log.debug('Found new Master, info={info}'.format(info=self.info))
if 'hostname' in self.info['address']:
host = self.info['address']['hostname']
elif 'ip' in self.info['address']: # pragma: no cover
host = self.info['address']['ip']
port = self.info['address']['port']
self.current_location = '{host}:{port}'.format(host=host, port=port)
raise gen.Return('http://{current_location}{path}'.format(
current_location=self.current_location, path=path))
def close(self):
if self.closing:
return
self.closing = True
def on_complete(self):
log.debug('Closed detector')
run_background(self.detector.close, on_complete)
def drain(iterable):
'''Helper method that empties an iterable as it is iterated over.
Works for: dict, collections.deque, list, set
'''
if getattr(iterable, 'popleft', False):
def next_item(coll):
return coll.popleft()
elif getattr(iterable, 'popitem', False):
def next_item(coll):
return coll.popitem()
else:
def next_item(coll):
return coll.pop()
while True:
try:
yield next_item(iterable)
except (IndexError, KeyError):
raise StopIteration
| apache-2.0 |
mwrlabs/veripy | contrib/rfc3736/__init__.py | 1 | 6086 | from veripy.models import ComplianceTestSuite
from veripy.models.decorators import must, should
import client
#import relay_agent
import server
class StatelessDHCPv6ServiceClientSpecification(ComplianceTestSuite):
"""
Stateless Dynamic Host Configuration Protocol Service for IPv6 (DHCPv6 Client)
These tests are designed to verify the readiness of a DHCPv6 client implementation
vis-a-vis the Stateless Dynamic Host Configuration Protocol for IPv6 specification.
@private
Author: MWR
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 7)
"""
TestCase001 = client.basic_message_exchanges.BasicMessageExchangeTestCase
TestCase002 = client.implementation_of_dhcp_constants.MulticastAddressesTestCase
TestCase003 = client.implementation_of_dhcp_constants.ValidUDPPortTestCase
TestCase004 = client.implementation_of_dhcp_constants.InvalidUDPPortTestCase
TestCase005 = client.client_message_format.ClientMessageFormatTestCase
TestCase006 = client.option_request_option_format.DNSRecursiveNameServerOptionTestCase
TestCase007 = client.option_request_option_format.DomainSearchListOptionTestCase
TestCase008 = client.transmission_of_information_request_messages.ReliabilityOfDHCPv6RetransmissionTestCase
TestCase011 = client.reception_of_reply_messages_for_dns_configuration_options.DNSRecursiveNameServerOptionTestCase
TestCase012 = client.reception_of_reply_messages_for_dns_configuration_options.DomainSearchListOptionTestCase
TestCase013 = client.reception_of_invalid_reply_message.NoServerIdentifierOptionTestCase
TestCase014 = client.reception_of_invalid_reply_message.TransactionIDMismatchTestCase
#TestCase015 = client.client_message_validation.SolicitMessageTestCase
#TestCase016 = client.client_message_validation.RequestMessageTestCase
#TestCase017 = client.client_message_validation.ConfirmMessageTestCase
#TestCase018 = client.client_message_validation.RenewMessageTestCase
#TestCase019 = client.client_message_validation.RebindMessageTestCase
#TestCase020 = client.client_message_validation.DeclineMessageTestCase
#TestCase021 = client.client_message_validation.ReleaseMessageTestCase
#TestCase022 = client.client_message_validation.InformationRequestMessageTestCase
#TestCase023 = client.client_message_validation.RelayForwardMessageTestCase
#TestCase024 = client.client_message_validation.RelayReplyMessageTestCase
TestCase025 = client.client_dhcp_unique_identifier.DUIDFormatTestCase
class StatelessDHCPv6ServiceServerSpecification(ComplianceTestSuite):
"""
Stateless Dynamic Host Configuration Protocol Service for IPv6 (DHCPv6 Server)
These tests are designed to verify the readiness of a DHCPv6 server implementation
vis-a-vis the Stateless Dynamic Host Configuration Protocol for IPv6 specification.
@private
Author: MWR
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 8)
"""
TestCase001 = server.basic_message_exchanges.BasicMessageExchangeTestCase
TestCase002 = server.transaction_id_consistency.TransactionIDConsistencyTestCase
TestCase003 = server.implementation_of_dhcp_constants.ValidUDPPortTestCase
TestCase004 = server.implementation_of_dhcp_constants.InvalidUDPPortTestCase
TestCase005 = server.server_message_format.ClientServerMessageFormatTestCase
TestCase006 = server.server_message_format.RelayAgentServerMessageFormatTestCase
TestCase007 = server.server_identifier_option_format.ServerIdentifierOptionFormatTestCase
TestCase008 = server.dhcp_unique_identifier_contents.DHCPUniqueIdentifierContentsTestCase
TestCase009 = server.dns_recursive_name_server_option_format.DNSRecursiveNameServerOptionFormatTestCase
TestCase010 = server.domain_search_list_option_format.DomainSearchListOptionFormatTestCase
TestCase011 = server.interface_id_option_format.InterfaceIDOptionFormatTestCase
TestCase012 = server.relay_message_option_format.RelayMessageOptionFormatTestCase
TestCase013 = should(server.configuration_of_dns_options.ReturningDNSRecursiveNameServerOptionTestCase)
TestCase014 = server.configuration_of_dns_options.ReturningDNSServerandDomainSearchListOptionsTestCase
TestCase015 = should(server.creation_and_transmission_of_reply_messages.ReplyMessageTransmissionTestCase)
TestCase016 = server.creation_and_transmission_of_reply_messages.ReplyMessageTransmissionWithDNSRNSOptionTestCase
TestCase017 = server.creation_and_transmission_of_reply_messages.ReplyMessageTransmissionWithDomainSearchListOptionTestCase
TestCase018 = server.creation_and_transmission_of_reply_messages.RelayReplyMessageWithoutInterfaceIDTestCase
TestCase019 = server.creation_and_transmission_of_reply_messages.RelayReplyMessageWithInterfaceIDTestCase
TestCase020 = server.creation_and_transmission_of_relay_reply_messages.RelayReplyMessageTransmissionTestCase
TestCase021 = server.creation_and_transmission_of_relay_reply_messages.MultipleRelayReplyMessageTransmissionTestCase
TestCase022 = server.creation_and_transmission_of_relay_reply_messages.EncapsulatedRelayReplyMessageTransmissionTestCase
TestCase023 = server.reception_of_invalid_information_request_message.ReceptionOfInformationRequestMessageViaUnicastTestCase
TestCase024 = server.reception_of_invalid_information_request_message.ContainsServerIdentifierOptionTestCase
TestCase025 = server.reception_of_invalid_information_request_message.ContainsIANAOptionTestCase
TestCase026 = server.server_message_validation.AdvertiseMessageTestCase
TestCase027 = server.server_message_validation.ReplyMessageTestCase
TestCase028 = server.server_message_validation.RelayReplyMessageTestCase
ComplianceTestSuite.register('stateless-dhcpv6-client', StatelessDHCPv6ServiceClientSpecification)
#ComplianceTestSuite.register('dhcpv6-relay-agent', StatelessDHCPv6ServiceRelayAgentSpecification)
ComplianceTestSuite.register('stateless-dhcpv6-server', StatelessDHCPv6ServiceServerSpecification)
| gpl-3.0 |
tpokorra/pykolab | pykolab/cli/cmd_remove_mailaddress.py | 1 | 3257 | # -*- coding: utf-8 -*-
# Copyright 2010-2013 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import commands
import pykolab
from pykolab.auth import Auth
from pykolab import utils
from pykolab.translate import _
log = pykolab.getLogger('pykolab.cli')
conf = pykolab.getConf()
def __init__():
commands.register('remove_mail', execute, description=description())
def description():
return """Remove a recipient's mail address."""
def execute(*args, **kw):
try:
email_address = conf.cli_args.pop(0)
except IndexError, errmsg:
email_address = utils.ask_question("Email address to remove")
# Get the domain from the email address
if len(email_address.split('@')) > 1:
domain = email_address.split('@')[1]
else:
log.error(_("Invalid or unqualified email address."))
sys.exit(1)
auth = Auth()
auth.connect(domain=domain)
recipients = auth.find_recipient(email_address)
if len(recipients) == 0:
log.error(_("No recipient found for email address %r") % (email_address))
sys.exit(1)
log.debug(_("Found the following recipient(s): %r") % (recipients), level=8)
mail_attributes = conf.get_list(domain, 'mail_attributes')
if mail_attributes == None or len(mail_attributes) < 1:
mail_attributes = conf.get_list(conf.get('kolab', 'auth_mechanism'), 'mail_attributes')
log.debug(_("Using the following mail attributes: %r") % (mail_attributes), level=8)
if isinstance(recipients, basestring):
recipient = recipients
# Only a single recipient found, remove the address
attributes = auth.get_entry_attributes(domain, recipient, mail_attributes)
# See which attribute holds the value we're trying to remove
for attribute in attributes.keys():
if isinstance(attributes[attribute], list):
if email_address in attributes[attribute]:
attributes[attribute].pop(attributes[attribute].index(email_address))
replace_attributes = {
attribute: attributes[attribute]
}
auth.set_entry_attributes(domain, recipient, replace_attributes)
else:
if email_address == attributes[attribute]:
auth.set_entry_attributes(domain, recipient, {attribute: None})
pass
else:
print >> sys.stderr, _("Found the following recipients:")
for recipient in recipients:
print recipient
| gpl-3.0 |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/build/gyp/test/win/gyptest-link-pgo.py | 239 | 2993 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure PGO is working properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('pgo.gyp', chdir=CHDIR)
def IsPGOAvailable():
"""Returns true if the Visual Studio available here supports PGO."""
test.build('pgo.gyp', 'gen_linker_option', chdir=CHDIR)
tmpfile = test.read(test.built_file_path('linker_options.txt', chdir=CHDIR))
return any(line.find('PGOPTIMIZE') for line in tmpfile)
# Test generated build files look fine.
if test.format == 'ninja':
ninja = test.built_file_path('obj/test_pgo_instrument.ninja', chdir=CHDIR)
test.must_contain(ninja, '/LTCG:PGINSTRUMENT')
test.must_contain(ninja, 'test_pgo.pgd')
ninja = test.built_file_path('obj/test_pgo_optimize.ninja', chdir=CHDIR)
test.must_contain(ninja, '/LTCG:PGOPTIMIZE')
test.must_contain(ninja, 'test_pgo.pgd')
ninja = test.built_file_path('obj/test_pgo_update.ninja', chdir=CHDIR)
test.must_contain(ninja, '/LTCG:PGUPDATE')
test.must_contain(ninja, 'test_pgo.pgd')
elif test.format == 'msvs':
LTCG_FORMAT = '<LinkTimeCodeGeneration>%s</LinkTimeCodeGeneration>'
vcproj = test.workpath('linker-flags/test_pgo_instrument.vcxproj')
test.must_contain(vcproj, LTCG_FORMAT % 'PGInstrument')
test.must_contain(vcproj, 'test_pgo.pgd')
vcproj = test.workpath('linker-flags/test_pgo_optimize.vcxproj')
test.must_contain(vcproj, LTCG_FORMAT % 'PGOptimization')
test.must_contain(vcproj, 'test_pgo.pgd')
vcproj = test.workpath('linker-flags/test_pgo_update.vcxproj')
test.must_contain(vcproj, LTCG_FORMAT % 'PGUpdate')
test.must_contain(vcproj, 'test_pgo.pgd')
# When PGO is available, try building binaries with PGO.
if IsPGOAvailable():
pgd_path = test.built_file_path('test_pgo.pgd', chdir=CHDIR)
# Test if 'PGInstrument' generates PGD (Profile-Guided Database) file.
if os.path.exists(pgd_path):
test.unlink(pgd_path)
test.must_not_exist(pgd_path)
test.build('pgo.gyp', 'test_pgo_instrument', chdir=CHDIR)
test.must_exist(pgd_path)
# Test if 'PGOptimize' works well
test.build('pgo.gyp', 'test_pgo_optimize', chdir=CHDIR)
test.must_contain_any_line(test.stdout(), ['profiled functions'])
# Test if 'PGUpdate' works well
test.build('pgo.gyp', 'test_pgo_update', chdir=CHDIR)
# With 'PGUpdate', linker should not complain that sources are changed after
# the previous training run.
test.touch(test.workpath('linker-flags/inline_test_main.cc'))
test.unlink(test.built_file_path('test_pgo_update.exe', chdir=CHDIR))
test.build('pgo.gyp', 'test_pgo_update', chdir=CHDIR)
test.must_contain_any_line(test.stdout(), ['profiled functions'])
test.pass_test()
| apache-2.0 |
toshywoshy/ansible | lib/ansible/plugins/lookup/k8s.py | 38 | 11371 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: k8s
version_added: "2.5"
short_description: Query the K8s API
description:
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
namespace, or all matching objects for all namespaces, as well as information about the cluster.
- Provides access the full range of K8s APIs.
- Enables authentication via config file, certificates, password or token.
options:
cluster_info:
description:
- Use to specify the type of cluster information you are attempting to retrieve. Will take priority
over all the other options.
api_version:
description:
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
I(resource_definition) will override this option.
default: v1
kind:
description:
- Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
I(resource_definition) will override this option.
required: true
resource_name:
description:
- Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
from the I(resource_definition) will override this option.
namespace:
description:
- Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
I(metadata.namespace) value from the I(resource_definition) will override this option.
label_selector:
description:
- Additional labels to include in the query. Ignored when I(resource_name) is provided.
field_selector:
description:
- Specific fields on which to query. Ignored when I(resource_name) is provided.
resource_definition:
description:
- "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
will be overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment
variable.
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment
variable.
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
environment variable.
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
aliases: [ verify_ssl ]
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
"""
EXAMPLES = """
- name: Fetch a list of namespaces
set_fact:
projects: "{{ lookup('k8s', api_version='v1', kind='Namespace') }}"
- name: Fetch all deployments
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment') }}"
- name: Fetch all deployments in a namespace
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch a specific deployment by name
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}"
- name: Fetch with label selector
set_fact:
service: "{{ lookup('k8s', kind='Service', label_selector='app=galaxy') }}"
# Use parameters from a YAML config
- name: Load config from the Ansible controller filesystem
set_fact:
config: "{{ lookup('file', 'service.yml') | from_yaml }}"
- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
set_fact:
service: "{{ lookup('k8s', resource_definition=config) }}"
- name: Use a config from the local filesystem
set_fact:
service: "{{ lookup('k8s', src='service.yml') }}"
"""
RETURN = """
_list:
description:
- One ore more object definitions returned from the API.
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.k8s.common import K8sAnsibleMixin
from ansible.errors import AnsibleError
try:
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import NotFoundError
HAS_K8S_MODULE_HELPER = True
k8s_import_exception = None
except ImportError as e:
HAS_K8S_MODULE_HELPER = False
k8s_import_exception = e
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
class KubernetesLookup(K8sAnsibleMixin):
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
"Requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception)
)
if not HAS_YAML:
raise Exception(
"Requires PyYAML. Try `pip install PyYAML`"
)
self.kind = None
self.name = None
self.namespace = None
self.api_version = None
self.label_selector = None
self.field_selector = None
self.include_uninitialized = None
self.resource_definition = None
self.helper = None
self.connection = {}
def fail(self, msg=None):
raise AnsibleError(msg)
def run(self, terms, variables=None, **kwargs):
self.params = kwargs
self.client = self.get_api_client()
cluster_info = kwargs.get('cluster_info')
if cluster_info == 'version':
return [self.client.version]
if cluster_info == 'api_groups':
return [self.client.resources.api_groups]
self.kind = kwargs.get('kind')
self.name = kwargs.get('resource_name')
self.namespace = kwargs.get('namespace')
self.api_version = kwargs.get('api_version', 'v1')
self.label_selector = kwargs.get('label_selector')
self.field_selector = kwargs.get('field_selector')
self.include_uninitialized = kwargs.get('include_uninitialized', False)
resource_definition = kwargs.get('resource_definition')
src = kwargs.get('src')
if src:
resource_definition = self.load_resource_definitions(src)[0]
if resource_definition:
self.kind = resource_definition.get('kind', self.kind)
self.api_version = resource_definition.get('apiVersion', self.api_version)
self.name = resource_definition.get('metadata', {}).get('name', self.name)
self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
if not self.kind:
raise AnsibleError(
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
"using the 'resource_definition' parameter."
)
resource = self.find_resource(self.kind, self.api_version, fail=True)
try:
k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
except NotFoundError:
return []
if self.name:
return [k8s_obj.to_dict()]
return k8s_obj.to_dict().get('items')
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
return KubernetesLookup().run(terms, variables=variables, **kwargs)
| gpl-3.0 |
nealtodd/django | tests/admin_custom_urls/models.py | 288 | 2513 | from functools import update_wrapper
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseRedirect
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Action(models.Model):
name = models.CharField(max_length=50, primary_key=True)
description = models.CharField(max_length=70)
def __str__(self):
return self.name
class ActionAdmin(admin.ModelAdmin):
"""
A ModelAdmin for the Action model that changes the URL of the add_view
to '<app name>/<model name>/!add/'
The Action model has a CharField PK.
"""
list_display = ('name', 'description')
def remove_url(self, name):
"""
Remove all entries named 'name' from the ModelAdmin instance URL
patterns list
"""
return [url for url in super(ActionAdmin, self).get_urls() if url.name != name]
def get_urls(self):
# Add the URL of our custom 'add_view' view to the front of the URLs
# list. Remove the existing one(s) first
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
view_name = '%s_%s_add' % info
return [
url(r'^!add/$', wrap(self.add_view), name=view_name),
] + self.remove_url(view_name)
class Person(models.Model):
name = models.CharField(max_length=20)
class PersonAdmin(admin.ModelAdmin):
def response_post_save_add(self, request, obj):
return HttpResponseRedirect(
reverse('admin:admin_custom_urls_person_history', args=[obj.pk]))
def response_post_save_change(self, request, obj):
return HttpResponseRedirect(
reverse('admin:admin_custom_urls_person_delete', args=[obj.pk]))
class Car(models.Model):
name = models.CharField(max_length=20)
class CarAdmin(admin.ModelAdmin):
def response_add(self, request, obj, post_url_continue=None):
return super(CarAdmin, self).response_add(
request, obj, post_url_continue=reverse('admin:admin_custom_urls_car_history', args=[obj.pk]))
site = admin.AdminSite(name='admin_custom_urls')
site.register(Action, ActionAdmin)
site.register(Person, PersonAdmin)
site.register(Car, CarAdmin)
| bsd-3-clause |
jpwhite3/wilmu-linux-toolkit | lab_toolkit/includes/linecache2/tests/test_linecache.py | 17 | 6508 | """ Tests for the linecache module """
import linecache2 as linecache
import unittest2 as unittest
import os.path
import tempfile
from fixtures import NestedTempfile
FILENAME = os.__file__
if FILENAME.endswith('.pyc'):
FILENAME = FILENAME[:-1]
NONEXISTENT_FILENAME = FILENAME + '.missing'
INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
TEST_PATH = os.path.dirname(__file__)
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
SOURCE_1 = '''
" Docstring "
def function():
return result
'''
SOURCE_2 = '''
def f():
return 1 + 1
a = f()
'''
SOURCE_3 = '''
def f():
return 3''' # No ending newline
class LineCacheTests(unittest.TestCase):
def setUp(self):
tempdir = NestedTempfile()
tempdir.setUp()
self.addCleanup(tempdir.cleanUp)
def test_getline(self):
getline = linecache.getline
# Bad values for line number should return an empty string
self.assertEqual(getline(FILENAME, 2**15), EMPTY)
self.assertEqual(getline(FILENAME, -1), EMPTY)
# Float values currently raise TypeError, should it?
self.assertRaises(TypeError, getline, FILENAME, 1.1)
# Bad filenames should return an empty string
self.assertEqual(getline(EMPTY, 1), EMPTY)
self.assertEqual(getline(INVALID_NAME, 1), EMPTY)
# Check whether lines correspond to those from file iteration
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check module loading
for entry in MODULES:
filename = os.path.join(MODULE_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check that bogus data isn't returned (issue #1309567)
empty = linecache.getlines('a/b/c/__init__.py')
self.assertEqual(empty, [])
def test_no_ending_newline(self):
temp_file = tempfile.NamedTemporaryFile(
suffix='.py', mode='w', delete=False)
self.addCleanup(os.unlink, temp_file.name)
with open(temp_file.name, "w") as fp:
fp.write(SOURCE_3)
lines = linecache.getlines(temp_file.name)
self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"])
def test_clearcache(self):
cached = []
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
cached.append(filename)
linecache.getline(filename, 1)
# Are all files cached?
cached_empty = [fn for fn in cached if fn not in linecache.cache]
self.assertEqual(cached_empty, [])
# Can we clear the cache?
linecache.clearcache()
cached_empty = [fn for fn in cached if fn in linecache.cache]
self.assertEqual(cached_empty, [])
def test_checkcache(self):
getline = linecache.getline
# Create a source file and cache its contents
temp_file = tempfile.NamedTemporaryFile(
suffix='.py', mode='w', delete=False)
source_name = temp_file.name
self.addCleanup(os.unlink, source_name)
with open(source_name, 'w') as source:
source.write(SOURCE_1)
getline(source_name, 1)
# Keep a copy of the old contents
source_list = []
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
with open(source_name, 'w') as source:
source.write(SOURCE_2)
# Try to update a bogus cache entry
linecache.checkcache('dummy')
# Check that the cache matches the old contents
for index, line in enumerate(source_list):
self.assertEqual(line, getline(source_name, index + 1))
# Update the cache and check whether it matches the new source file
linecache.checkcache(source_name)
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
def test_lazycache_no_globals(self):
lines = linecache.getlines(FILENAME)
linecache.clearcache()
self.assertEqual(False, linecache.lazycache(FILENAME, None))
self.assertEqual(lines, linecache.getlines(FILENAME))
@unittest.skipIf("__loader__" not in globals(), "Modules not PEP302 by default")
def test_lazycache_smoke(self):
lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
linecache.clearcache()
self.assertEqual(
True, linecache.lazycache(NONEXISTENT_FILENAME, globals()))
self.assertEqual(1, len(linecache.cache[NONEXISTENT_FILENAME]))
# Note here that we're looking up a non existant filename with no
# globals: this would error if the lazy value wasn't resolved.
self.assertEqual(lines, linecache.getlines(NONEXISTENT_FILENAME))
def test_lazycache_provide_after_failed_lookup(self):
linecache.clearcache()
lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
linecache.clearcache()
linecache.getlines(NONEXISTENT_FILENAME)
linecache.lazycache(NONEXISTENT_FILENAME, globals())
self.assertEqual(lines, linecache.updatecache(NONEXISTENT_FILENAME))
def test_lazycache_check(self):
linecache.clearcache()
linecache.lazycache(NONEXISTENT_FILENAME, globals())
linecache.checkcache()
def test_lazycache_bad_filename(self):
linecache.clearcache()
self.assertEqual(False, linecache.lazycache('', globals()))
self.assertEqual(False, linecache.lazycache('<foo>', globals()))
@unittest.skipIf("__loader__" not in globals(), "Modules not PEP302 by default")
def test_lazycache_already_cached(self):
linecache.clearcache()
lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
self.assertEqual(
False,
linecache.lazycache(NONEXISTENT_FILENAME, globals()))
self.assertEqual(4, len(linecache.cache[NONEXISTENT_FILENAME]))
| mit |
daviddupont69/CouchPotatoServer | libs/sqlalchemy/sql/__init__.py | 18 | 1138 | # sql/__init__.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.sql.expression import (
Alias,
ClauseElement,
ColumnCollection,
ColumnElement,
CompoundSelect,
Delete,
FromClause,
Insert,
Join,
Select,
Selectable,
TableClause,
Update,
alias,
and_,
asc,
between,
bindparam,
case,
cast,
collate,
column,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
false,
func,
insert,
intersect,
intersect_all,
join,
label,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
table,
text,
true,
tuple_,
type_coerce,
union,
union_all,
update,
)
from sqlalchemy.sql.visitors import ClauseVisitor
__tmp = locals().keys()
__all__ = sorted([i for i in __tmp if not i.startswith('__')])
| gpl-3.0 |
robynbergeron/ansible-modules-extras | network/f5/bigip_node.py | 77 | 13267 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Matt Hite <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_node
short_description: "Manages F5 BIG-IP LTM nodes"
description:
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
author: "Matt Hite (@mhite)"
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
choices: []
aliases: []
user:
description:
- BIG-IP username
required: true
default: null
choices: []
aliases: []
password:
description:
- BIG-IP password
required: true
default: null
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Pool member state
required: true
default: present
choices: ['present', 'absent']
aliases: []
session_state:
description:
- Set new session availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
monitor_state:
description:
- Set monitor availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
choices: []
aliases: []
name:
description:
- "Node name"
required: false
default: null
choices: []
host:
description:
- "Node IP. Required when state=present and node does not exist. Error when state=absent."
required: true
default: null
choices: []
aliases: ['address', 'ip']
description:
description:
- "Node description."
required: false
default: null
choices: []
'''
EXAMPLES = '''
## playbook task examples:
---
# file bigip-test.yml
# ...
- hosts: bigip-test
tasks:
- name: Add node
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=present
partition=matthite
host="{{ ansible_default_ipv4["address"] }}"
name="{{ ansible_default_ipv4["address"] }}"
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
# Future plays referencing this node no longer use the host
# parameter but instead use the name parameter.
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- name: Modify node description
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=present
partition=matthite
name="{{ ansible_default_ipv4["address"] }}"
description="Our best server yet"
- name: Delete node
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=absent
partition=matthite
name="{{ ansible_default_ipv4["address"] }}"
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force node offline
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=present
session_state=disabled
monitor_state=disabled
partition=matthite
name="{{ ansible_default_ipv4["address"] }}"
'''
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_node_address(api, address, name):
try:
api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0])
result = True
desc = ""
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
else:
# genuine exception
raise
return (result, desc)
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
except bigsuds.OperationFailed, e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
else:
# genuine exception
raise
return (result, desc)
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def main():
argument_spec=f5_argument_spec();
argument_spec.update(dict(
session_state = dict(type='str', choices=['enabled', 'disabled']),
monitor_state = dict(type='str', choices=['enabled', 'disabled']),
name = dict(type='str', required=True),
host = dict(type='str', aliases=['address', 'ip']),
description = dict(type='str')
)
)
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode=True
)
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
host = module.params['host']
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
try:
api = bigip_api(server, user, password)
result = {'changed': False} # default
if state == 'absent':
if node_exists(api, address):
if not module.check_mode:
deleted, desc = delete_node_address(api, address)
if not deleted:
module.fail_json(msg="unable to delete: %s" % desc)
else:
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if not node_exists(api, address):
if host is None:
module.fail_json(msg="host parameter required when " \
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
if not created:
module.fail_json(msg="unable to create: %s" % desc)
else:
result = {'changed': True}
if session_state is not None:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
set_node_monitor_state(api, address, monitor_state)
result = {'changed': True}
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
else:
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
module.fail_json(msg="Changing the node address is " \
"not supported by the API; " \
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
if session_state == 'enabled' and \
session_status == 'forced_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_node_monitor_status(api, address)
if monitor_state == 'enabled' and \
monitor_status == 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
if description is not None:
if get_node_description(api, address) != description:
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
main()
| gpl-3.0 |
lonvia/osgende | tools/osgende-mapserv-falcon.py | 1 | 9619 | # This file is part of Osgende
# Copyright (C) 2020 Sarah Hoffmann
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Falcon-based tile server for tile databases generated with osgende-mapgen.
Use with uWSGI.
"""
import datetime
import os
import sys
import threading
import hashlib
from math import pi,exp,atan
import falcon
import mapnik
RAD_TO_DEG = 180/pi
class TileProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels + 1):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromTileToLL(self, zoom, x, y):
e = self.zc[zoom]
f = (x*256.0 - e[0])/self.Bc[zoom]
g = (y*256.0 - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
def mk_tileid(zoom, x, y):
"""Create a unique 64 bit tile ID.
Works up to zoom level 24."
"""
return zoom + (x << 5) + (y << (5 + zoom))
class DummyCache(object):
""" A tile cache that does not remember any tiles.
Useful when testing out a new style.
"""
def __init__(self, config):
pass
def get(self, zoom, x, y, fmt):
return None
def set(self, zoom, x, y, fmt, image=None):
pass
class PostgresCache(object):
""" A cache that saves tiles in postgres.
"""
def __init__(self, config):
self.empty = dict()
for fmt, fname in config['empty_tile'].items():
with open(fname, 'rb') as myfile:
self.empty[fmt] = myfile.read()
self.max_zoom = config.get('max_zoom', 100)
self.pg = __import__('psycopg2')
self.dba = config['dba']
self.cmd_get = "SELECT pixbuf FROM %s WHERE id=%%s" % config['table']
self.cmd_check = "SELECT count(*) FROM %s WHERE id=%%s" % config['table']
self.cmd_set = "UPDATE %s SET pixbuf=%%s WHERE id=%%s AND pixbuf is Null" % config['table']
self.thread_data = threading.local()
def get_db(self):
if not hasattr(self.thread_data, 'cache_db'):
self.thread_data.cache_db = self.pg.connect(self.dba)
# set into autocommit mode so that tiles still can be
# read while the db is updated
self.thread_data.cache_db.autocommit = True
self.thread_data.cache_db.cursor().execute("SET synchronous_commit TO OFF")
return self.thread_data.cache_db
def get(self, zoom, x, y, fmt):
c = self.get_db().cursor()
if zoom > self.max_zoom:
shift = zoom - self.max_zoom
c.execute(self.cmd_check,
(mk_tileid(self.max_zoom, x >> shift, y >> shift), ))
if c.fetchone()[0]:
return None
else:
c.execute(self.cmd_get, (mk_tileid(zoom, x, y), ))
if c.rowcount > 0:
tile = c.fetchone()[0]
return bytes(tile) if tile is not None else None
return self.empty[fmt]
def set(self, zoom, x, y, fmt, image=None):
if zoom <= self.max_zoom:
c = self.get_db().cursor()
c.execute(self.cmd_set, (image, mk_tileid(zoom, x, y)))
class MapnikRenderer(object):
def __init__(self, name, config, styleconfig):
self.name = name
# defaults
self.config = dict({ 'formats' : [ 'png' ],
'tile_size' : (256, 256),
'max_zoom' : 18
})
self.stylecfg = dict()
# local configuration
if config is not None:
self.config.update(config)
if styleconfig is not None:
self.stylecfg.update(styleconfig)
if self.config['source_type'] == 'xml':
self.create_map = self._create_map_xml
if self.config['source_type'] == 'python':
self.python_map =__import__(self.config['source'])
self.create_map = self._create_map_python
m = mapnik.Map(*self.config['tile_size'])
self.create_map(m)
self.mproj = mapnik.Projection(m.srs)
self.gproj = TileProjection(self.config['max_zoom'])
self.thread_data = threading.local()
def get_map(self):
self.thread_map()
return self.thread_data.map
def thread_map(self):
if not hasattr(self.thread_data, 'map'):
m = mapnik.Map(*self.config['tile_size'])
self.create_map(m)
self.thread_data.map = m
def _create_map_xml(self, mapnik_map):
src = os.path.join(self.config['source'])
mapnik.load_map(mapnik_map, src)
def _create_map_python(self, mapnik_map):
self.python_map.construct_map(mapnik_map, self.stylecfg)
def split_url(self, zoom, x, y):
ypt = y.find('.')
if ypt < 0:
return None
tiletype = y[ypt+1:]
if tiletype not in self.config['formats']:
return None
try:
zoom = int(zoom)
x = int(x)
y = int(y[:ypt])
except ValueError:
return None
if zoom > self.config['max_zoom']:
return None
return (zoom, x, y, tiletype)
def render(self, zoom, x, y, fmt):
p0 = self.gproj.fromTileToLL(zoom, x, y+1)
p1 = self.gproj.fromTileToLL(zoom, x+1, y)
c0 = self.mproj.forward(mapnik.Coord(p0[0],p0[1]))
c1 = self.mproj.forward(mapnik.Coord(p1[0],p1[1]))
bbox = mapnik.Box2d(c0.x, c0.y, c1.x, c1.y)
im = mapnik.Image(256, 256)
m = self.get_map()
m.zoom_to_box(bbox)
mapnik.render(m, im)
return im.tostring('png256')
class TestMap(object):
DEFAULT_TESTMAP="""\
<!DOCTYPE html>
<html>
<head>
<title>Testmap - %(style)s</title>
<link rel="stylesheet" href="%(leaflet_path)s/leaflet.css" />
</head>
<body >
<div id="map" style="position: absolute; width: 99%%; height: 97%%"></div>
<script src="%(leaflet_path)s/leaflet.js"></script>
<script src="%(leaflet_path)s/leaflet-hash.js"></script>
<script>
var map = L.map('map').setView([47.3317, 8.5017], 13);
var hash = new L.Hash(map);
L.tileLayer('http://a.tile.openstreetmap.org/{z}/{x}/{y}.png', {
maxZoom: 18,
}).addTo(map);
L.tileLayer('%(script_name)s/%(style)s/{z}/{x}/{y}.png', {
maxZoom: 18,
}).addTo(map);
</script>
</body>
</html>
"""
def __init__(self, style, script):
self.map_config = {
'style' : style,
'script_name' : script,
'leaflet_path' : os.environ.get('LEAFLET_PATH',
'http://cdn.leafletjs.com/leaflet-0.7.5')
}
def on_get(self, req, resp):
resp.content_type = falcon.MEDIA_HTML
resp.body = self.DEFAULT_TESTMAP % self.map_config
class TileServer(object):
def __init__(self, style, config):
self.cachecfg = dict({ 'type' : 'DummyCache'})
if 'TILE_CACHE' in config:
self.cachecfg.update(config['TILE_CACHE'])
cacheclass = globals()[self.cachecfg['type']]
self.cache = cacheclass(self.cachecfg)
self.renderer = MapnikRenderer(style,
config.get('RENDERER'),
config.get('TILE_STYLE'))
def on_get(self, req, resp, zoom, x, y):
tile_desc = self.renderer.split_url(zoom, x, y)
if tile_desc is None:
raise falcon.HTTPNotFound()
tile = self.cache.get(*tile_desc)
if tile is None:
tile = self.renderer.render(*tile_desc)
self.cache.set(*tile_desc, image=tile)
# compute etag
m = hashlib.md5()
m.update(tile)
content_etag = m.hexdigest()
for etag in (req.if_none_match or []):
if etag == '*' or etag == content_etag:
resp.status = falcon.HTTP_304
return
resp.content_type = falcon.MEDIA_PNG
resp.expires = datetime.datetime.utcnow() + datetime.timedelta(hours=3)
resp.body = tile
resp.etag = content_etag
def setup_site(app, site, script_name=''):
try:
__import__(site)
except ImportError:
print("Missing config for site '%s'. Skipping." % site)
return
site_cfg = dict()
for var in dir(sys.modules[site]):
site_cfg[var] = getattr(sys.modules[site], var)
basename = site.split('.')[-1]
print("Setting up site", basename)
app.add_route('/' + basename + '/test-map', TestMap(basename, script_name))
app.add_route('/' + basename + '/{zoom}/{x}/{y}', TileServer(basename, site_cfg))
application = falcon.API()
for site in os.environ['TILE_SITES'].split(','):
setup_site(application, site)
| gpl-3.0 |
Softmotions/edx-platform | common/test/acceptance/tests/studio/test_studio_settings_details.py | 19 | 7658 | """
Acceptance tests for Studio's Settings Details pages
"""
from unittest import skip
from .base_studio_test import StudioCourseTest
from ...fixtures.course import CourseFixture
from ...pages.studio.settings import SettingsPage
from ...pages.studio.overview import CourseOutlinePage
from ...tests.studio.base_studio_test import StudioCourseTest
from ..helpers import (
generate_course_key,
select_option_by_value,
is_option_value_selected,
element_has_text,
)
class SettingsMilestonesTest(StudioCourseTest):
"""
Tests for milestones feature in Studio's settings tab
"""
def setUp(self, is_staff=True):
super(SettingsMilestonesTest, self).setUp(is_staff=is_staff)
self.settings_detail = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_detail.visit()
self.assertTrue(self.settings_detail.is_browser_on_page())
def test_page_has_prerequisite_field(self):
"""
Test to make sure page has pre-requisite course field if milestones app is enabled.
"""
self.assertTrue(self.settings_detail.pre_requisite_course_options)
def test_prerequisite_course_save_successfully(self):
"""
Scenario: Selecting course from Pre-Requisite course drop down save the selected course as pre-requisite
course.
Given that I am on the Schedule & Details page on studio
When I select an item in pre-requisite course drop down and click Save Changes button
Then My selected item should be saved as pre-requisite course
And My selected item should be selected after refreshing the page.'
"""
course_number = self.unique_id
CourseFixture(
org='test_org',
number=course_number,
run='test_run',
display_name='Test Course' + course_number
).install()
pre_requisite_course_key = generate_course_key(
org='test_org',
number=course_number,
run='test_run'
)
pre_requisite_course_id = unicode(pre_requisite_course_key)
# Refresh the page to load the new course fixture and populate the prrequisite course dropdown
# Then select the prerequisite course and save the changes
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again and confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
))
# Set the prerequisite course back to None and save the changes
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the None selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
))
# Re-pick the prerequisite course and confirm no errors are thrown (covers a discovered bug)
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
dropdown_status = is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.assertTrue(dropdown_status)
def test_page_has_enable_entrance_exam_field(self):
"""
Test to make sure page has 'enable entrance exam' field.
"""
self.assertTrue(self.settings_detail.entrance_exam_field)
@skip('Passes in devstack, passes individually in Jenkins, fails in suite in Jenkins.')
def test_enable_entrance_exam_for_course(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And also that the entrance exam is destroyed after deselecting the checkbox.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
# title with text 'Entrance Exam' should be present on page.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
# Delete the currently created entrance exam.
self.settings_detail.visit()
self.settings_detail.require_entrance_exam(required=False)
self.settings_detail.save_changes()
course_outline_page.visit()
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
def test_entrance_exam_has_unit_button(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And user has option to add units only instead of any Subsection.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
course_outline_page.wait_for_ajax()
# button with text 'New Unit' should be present.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Unit'
))
# button with text 'New Subsection' should not be present.
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Subsection'
))
| agpl-3.0 |
dhuang/incubator-airflow | airflow/www/api/experimental/endpoints.py | 2 | 8145 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import airflow.api
from airflow.api.common.experimental import pool as pool_api
from airflow.api.common.experimental import trigger_dag as trigger
from airflow.api.common.experimental import delete_dag as delete
from airflow.api.common.experimental.get_task import get_task
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils import timezone
from airflow.www.app import csrf
from flask import (
g, Markup, Blueprint, redirect, jsonify, abort,
request, current_app, send_file, url_for
)
_log = LoggingMixin().log
requires_authentication = airflow.api.api_auth.requires_authentication
api_experimental = Blueprint('api_experimental', __name__)
@csrf.exempt
@api_experimental.route('/dags/<string:dag_id>/dag_runs', methods=['POST'])
@requires_authentication
def trigger_dag(dag_id):
"""
Trigger a new dag run for a Dag with an execution date of now unless
specified in the data.
"""
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'
.format(execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
if getattr(g, 'user', None):
_log.info("User {} created {}".format(g.user, dr))
response = jsonify(message="Created {}".format(dr))
return response
@csrf.exempt
@api_experimental.route('/dags/<string:dag_id>', methods=['DELETE'])
@requires_authentication
def delete_dag(dag_id):
"""
Delete all DB records related to the specified Dag.
"""
try:
count = delete.delete_dag(dag_id)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
return jsonify(message="Removed {} record(s)".format(count), count=count)
@api_experimental.route('/test', methods=['GET'])
@requires_authentication
def test():
return jsonify(status='OK')
@api_experimental.route('/dags/<string:dag_id>/tasks/<string:task_id>', methods=['GET'])
@requires_authentication
def task_info(dag_id, task_id):
"""Returns a JSON with a task's public instance variables. """
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields)
@api_experimental.route('/dags/<string:dag_id>/dag_runs/<string:execution_date>/tasks/<string:task_id>', methods=['GET'])
@requires_authentication
def task_instance_info(dag_id, execution_date, task_id):
"""
Returns a JSON with a task instance's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
"""
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'
.format(execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
info = get_task_instance(dag_id, task_id, execution_date)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields)
@api_experimental.route('/latest_runs', methods=['GET'])
@requires_authentication
def latest_dag_runs():
"""Returns the latest DagRun for each DAG formatted for the UI. """
from airflow.models import DagRun
dagruns = DagRun.get_latest_runs()
payload = []
for dagrun in dagruns:
if dagrun.execution_date:
payload.append({
'dag_id': dagrun.dag_id,
'execution_date': dagrun.execution_date.isoformat(),
'start_date': ((dagrun.start_date or '') and
dagrun.start_date.isoformat()),
'dag_run_url': url_for('airflow.graph', dag_id=dagrun.dag_id,
execution_date=dagrun.execution_date)
})
return jsonify(items=payload) # old flask versions dont support jsonifying arrays
@api_experimental.route('/pools/<string:name>', methods=['GET'])
@requires_authentication
def get_pool(name):
"""Get pool by a given name."""
try:
pool = pool_api.get_pool(name=name)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
@api_experimental.route('/pools', methods=['GET'])
@requires_authentication
def get_pools():
"""Get all pools."""
try:
pools = pool_api.get_pools()
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify([p.to_json() for p in pools])
@csrf.exempt
@api_experimental.route('/pools', methods=['POST'])
@requires_authentication
def create_pool():
"""Create a pool."""
params = request.get_json(force=True)
try:
pool = pool_api.create_pool(**params)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
@csrf.exempt
@api_experimental.route('/pools/<string:name>', methods=['DELETE'])
@requires_authentication
def delete_pool(name):
"""Delete pool."""
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
| apache-2.0 |
jinzo/django-dbpool-backend | django_dbpool_backends/mysql/creation.py | 311 | 3019 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
| bsd-3-clause |
tumbl3w33d/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_disk.py | 9 | 34453 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_disk
short_description: Manage disks related to virtual machine in given vCenter infrastructure
description:
- This module can be used to add, remove and update disks belonging to given virtual machine.
- All parameters and VMware object names are case sensitive.
- This module is destructive in nature, please read documentation carefully before proceeding.
- Be careful while removing disk specified as this may lead to data loss.
version_added: 2.8
author:
- Abhijeet Kasurde (@Akasurde) <[email protected]>
notes:
- Tested on vSphere 6.0 and 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the virtual machine.
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
version_added: '2.9'
type: str
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- This is a required parameter, only if multiple VMs are found with same name.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
datacenter:
description:
- The datacenter name to which virtual machine belongs to.
required: True
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
disk:
description:
- A list of disks to add.
- The virtual disk related information is provided using this list.
- All values and parameters are case sensitive.
- 'Valid attributes are:'
- ' - C(size[_tb,_gb,_mb,_kb]) (integer): Disk storage size in specified unit.'
- ' If C(size) specified then unit must be specified. There is no space allowed in between size number and unit.'
- ' Only first occurrence in disk element will be considered, even if there are multiple size* parameters available.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk'
- ' - C(thick) thick disk'
- ' Default: C(thick) thick disk, no eagerzero.'
- ' - C(disk_mode) (string): Type of disk mode. Valid values are:'
- ' - C(persistent) Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent) Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent) Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
- ' - C(datastore) (string): Name of datastore or datastore cluster to be used for the disk.'
- ' - C(autoselect_datastore) (bool): Select the less used datastore. Specify only if C(datastore) is not specified.'
- ' - C(scsi_controller) (integer): SCSI controller number. Valid value range from 0 to 3.'
- ' Only 4 SCSI controllers are allowed per VM.'
- ' Care should be taken while specifying C(scsi_controller) is 0 and C(unit_number) as 0 as this disk may contain OS.'
- ' - C(unit_number) (integer): Disk Unit Number. Valid value range from 0 to 15. Only 15 disks are allowed per SCSI Controller.'
- ' - C(scsi_type) (string): Type of SCSI controller. This value is required only for the first occurrence of SCSI Controller.'
- ' This value is ignored, if SCSI Controller is already present or C(state) is C(absent).'
- ' Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual).'
- ' C(paravirtual) is default value for this parameter.'
- ' - C(state) (string): State of disk. This is either "absent" or "present".'
- ' If C(state) is set to C(absent), disk will be removed permanently from virtual machine configuration and from VMware storage.'
- ' If C(state) is set to C(present), disk will be added if not present at given SCSI Controller and Unit Number.'
- ' If C(state) is set to C(present) and disk exists with different size, disk size is increased.'
- ' Reducing disk size is not allowed.'
default: []
type: list
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add disks to virtual machine using UUID
vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
disk:
- size_mb: 10
type: thin
datastore: datacluster0
state: present
scsi_controller: 1
unit_number: 1
scsi_type: 'paravirtual'
disk_mode: 'persistent'
- size_gb: 10
type: eagerzeroedthick
state: present
autoselect_datastore: True
scsi_controller: 2
scsi_type: 'buslogic'
unit_number: 12
disk_mode: 'independent_persistent'
- size: 10Gb
type: eagerzeroedthick
state: present
autoselect_datastore: True
scsi_controller: 2
scsi_type: 'buslogic'
unit_number: 1
disk_mode: 'independent_nonpersistent'
delegate_to: localhost
register: disk_facts
- name: Remove disks from virtual machine using name
vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
name: VM_225
disk:
- state: absent
scsi_controller: 1
unit_number: 1
delegate_to: localhost
register: disk_facts
- name: Remove disks from virtual machine using moid
vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
moid: vm-42
disk:
- state: absent
scsi_controller: 1
unit_number: 1
delegate_to: localhost
register: disk_facts
'''
RETURN = """
disk_status:
description: metadata about the virtual machine's disks after managing them
returned: always
type: dict
sample: {
"0": {
"backing_datastore": "datastore2",
"backing_disk_mode": "persistent",
"backing_eagerlyscrub": false,
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
"backing_thinprovisioned": false,
"backing_writethrough": false,
"capacity_in_bytes": 10485760,
"capacity_in_kb": 10240,
"controller_key": 1000,
"key": 2000,
"label": "Hard disk 1",
"summary": "10,240 KB",
"unit_number": 0
},
}
"""
import re
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, find_obj, get_all_objs
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.desired_disks = self.params['disk'] # Match with vmware_guest parameter
self.vm = None
self.scsi_device_type = dict(lsilogic=vim.vm.device.VirtualLsiLogicController,
paravirtual=vim.vm.device.ParaVirtualSCSIController,
buslogic=vim.vm.device.VirtualBusLogicController,
lsilogicsas=vim.vm.device.VirtualLsiLogicSASController)
self.config_spec = vim.vm.ConfigSpec()
self.config_spec.deviceChange = []
def create_scsi_controller(self, scsi_type, scsi_bus_number):
"""
Create SCSI Controller with given SCSI Type and SCSI Bus Number
Args:
scsi_type: Type of SCSI
scsi_bus_number: SCSI Bus number to be assigned
Returns: Virtual device spec for SCSI Controller
"""
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_ctl.device = self.scsi_device_type[scsi_type]()
scsi_ctl.device.unitNumber = 3
scsi_ctl.device.busNumber = scsi_bus_number
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
@staticmethod
def create_scsi_disk(scsi_ctl_key, disk_index, disk_mode):
"""
Create Virtual Device Spec for virtual disk
Args:
scsi_ctl_key: Unique SCSI Controller Key
disk_index: Disk unit number at which disk needs to be attached
Returns: Virtual Device Spec for virtual disk
"""
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.diskMode = disk_mode
disk_spec.device.controllerKey = scsi_ctl_key
disk_spec.device.unitNumber = disk_index
return disk_spec
def reconfigure_vm(self, config_spec, device_type):
"""
Reconfigure virtual machine after modifying device spec
Args:
config_spec: Config Spec
device_type: Type of device being modified
Returns: Boolean status 'changed' and actual task result
"""
changed, results = (False, '')
try:
# Perform actual VM reconfiguration
task = self.vm.ReconfigVM_Task(spec=config_spec)
changed, results = wait_for_task(task)
except vim.fault.InvalidDeviceSpec as invalid_device_spec:
self.module.fail_json(msg="Failed to manage %s on given virtual machine due to invalid"
" device spec : %s" % (device_type, to_native(invalid_device_spec.msg)),
details="Please check ESXi server logs for more details.")
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
return changed, results
def ensure_disks(self, vm_obj=None):
"""
Manage internal state of virtual machine disks
Args:
vm_obj: Managed object of virtual machine
"""
# Set vm object
self.vm = vm_obj
# Sanitize user input
disk_data = self.sanitize_disk_inputs()
# Create stateful information about SCSI devices
current_scsi_info = dict()
results = dict(changed=False, disk_data=None, disk_changes=dict())
# Deal with SCSI Controller
for device in vm_obj.config.hardware.device:
if isinstance(device, tuple(self.scsi_device_type.values())):
# Found SCSI device
if device.busNumber not in current_scsi_info:
device_bus_number = 1000 + device.busNumber
current_scsi_info[device_bus_number] = dict(disks=dict())
scsi_changed = False
for disk in disk_data:
scsi_controller = disk['scsi_controller'] + 1000
if scsi_controller not in current_scsi_info and disk['state'] == 'present':
scsi_ctl = self.create_scsi_controller(disk['scsi_type'], disk['scsi_controller'])
current_scsi_info[scsi_controller] = dict(disks=dict())
self.config_spec.deviceChange.append(scsi_ctl)
scsi_changed = True
if scsi_changed:
self.reconfigure_vm(self.config_spec, 'SCSI Controller')
self.config_spec = vim.vm.ConfigSpec()
self.config_spec.deviceChange = []
# Deal with Disks
for device in vm_obj.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
# Found Virtual Disk device
if device.controllerKey not in current_scsi_info:
current_scsi_info[device.controllerKey] = dict(disks=dict())
current_scsi_info[device.controllerKey]['disks'][device.unitNumber] = device
vm_name = self.vm.name
disk_change_list = []
for disk in disk_data:
disk_change = False
scsi_controller = disk['scsi_controller'] + 1000 # VMware auto assign 1000 + SCSI Controller
if disk['disk_unit_number'] not in current_scsi_info[scsi_controller]['disks'] and disk['state'] == 'present':
# Add new disk
disk_spec = self.create_scsi_disk(scsi_controller, disk['disk_unit_number'], disk['disk_mode'])
disk_spec.device.capacityInKB = disk['size']
if disk['disk_type'] == 'thin':
disk_spec.device.backing.thinProvisioned = True
elif disk['disk_type'] == 'eagerzeroedthick':
disk_spec.device.backing.eagerlyScrub = True
disk_spec.device.backing.fileName = "[%s] %s/%s_%s_%s.vmdk" % (disk['datastore'].name,
vm_name, vm_name,
str(scsi_controller),
str(disk['disk_unit_number']))
disk_spec.device.backing.datastore = disk['datastore']
self.config_spec.deviceChange.append(disk_spec)
disk_change = True
current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] = disk_spec.device
results['disk_changes'][disk['disk_index']] = "Disk created."
elif disk['disk_unit_number'] in current_scsi_info[scsi_controller]['disks']:
if disk['state'] == 'present':
disk_spec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']]
# Edit and no resizing allowed
if disk['size'] < disk_spec.device.capacityInKB:
self.module.fail_json(msg="Given disk size at disk index [%s] is smaller than found (%d < %d)."
" Reducing disks is not allowed." % (disk['disk_index'],
disk['size'],
disk_spec.device.capacityInKB))
if disk['size'] != disk_spec.device.capacityInKB:
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
disk_spec.device.capacityInKB = disk['size']
self.config_spec.deviceChange.append(disk_spec)
disk_change = True
results['disk_changes'][disk['disk_index']] = "Disk size increased."
else:
results['disk_changes'][disk['disk_index']] = "Disk already exists."
elif disk['state'] == 'absent':
# Disk already exists, deleting
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy
disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']]
self.config_spec.deviceChange.append(disk_spec)
disk_change = True
results['disk_changes'][disk['disk_index']] = "Disk deleted."
if disk_change:
# Adding multiple disks in a single attempt raises weird errors
# So adding single disk at a time.
self.reconfigure_vm(self.config_spec, 'disks')
self.config_spec = vim.vm.ConfigSpec()
self.config_spec.deviceChange = []
disk_change_list.append(disk_change)
if any(disk_change_list):
results['changed'] = True
results['disk_data'] = self.gather_disk_facts(vm_obj=self.vm)
self.module.exit_json(**results)
def sanitize_disk_inputs(self):
"""
Check correctness of disk input provided by user
Returns: A list of dictionary containing disk information
"""
disks_data = list()
if not self.desired_disks:
self.module.exit_json(changed=False, msg="No disks provided for virtual"
" machine '%s' for management." % self.vm.name)
for disk_index, disk in enumerate(self.desired_disks):
# Initialize default value for disk
current_disk = dict(disk_index=disk_index,
state='present',
datastore=None,
autoselect_datastore=True,
disk_unit_number=0,
scsi_controller=0,
disk_mode='persistent')
# Check state
if 'state' in disk:
if disk['state'] not in ['absent', 'present']:
self.module.fail_json(msg="Invalid state provided '%s' for disk index [%s]."
" State can be either - 'absent', 'present'" % (disk['state'],
disk_index))
else:
current_disk['state'] = disk['state']
if current_disk['state'] == 'present':
# Select datastore or datastore cluster
if 'datastore' in disk:
if 'autoselect_datastore' in disk:
self.module.fail_json(msg="Please specify either 'datastore' "
"or 'autoselect_datastore' for disk index [%s]" % disk_index)
# Check if given value is datastore or datastore cluster
datastore_name = disk['datastore']
datastore_cluster = find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = find_obj(self.content, [vim.Datastore], datastore_name)
if datastore is None:
self.module.fail_json(msg="Failed to find datastore named '%s' "
"in given configuration." % disk['datastore'])
current_disk['datastore'] = datastore
current_disk['autoselect_datastore'] = False
elif 'autoselect_datastore' in disk:
# Find datastore which fits requirement
datastores = get_all_objs(self.content, [vim.Datastore])
if not datastores:
self.module.fail_json(msg="Failed to gather information about"
" available datastores in given datacenter.")
datastore = None
datastore_freespace = 0
for ds in datastores:
if ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
datastore = ds
datastore_freespace = ds.summary.freeSpace
current_disk['datastore'] = datastore
if 'datastore' not in disk and 'autoselect_datastore' not in disk:
self.module.fail_json(msg="Either 'datastore' or 'autoselect_datastore' is"
" required parameter while creating disk for "
"disk index [%s]." % disk_index)
if [x for x in disk.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
disk_size_parse_failed = False
if 'size' in disk:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(disk['size'])
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
disk_size_parse_failed = True
try:
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
except (TypeError, ValueError, NameError):
disk_size_parse_failed = True
else:
# Even multiple size_ parameter provided by user,
# consider first value only
param = [x for x in disk.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1]
disk_size = disk[param]
if isinstance(disk_size, (float, int)):
disk_size = str(disk_size)
try:
if re.match(r'\d+\.\d+', disk_size):
# We found float value in string, let's typecast it
expected = float(disk_size)
else:
# We found int value in string, let's typecast it
expected = int(disk_size)
except (TypeError, ValueError, NameError):
disk_size_parse_failed = True
if disk_size_parse_failed:
# Common failure
self.module.fail_json(msg="Failed to parse disk size for disk index [%s],"
" please review value provided"
" using documentation." % disk_index)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
unit = unit.lower()
if unit in disk_units:
current_disk['size'] = expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size for disk index [%s]."
" Supported units are ['%s']." % (unit,
disk_index,
"', '".join(disk_units.keys())))
else:
# No size found but disk, fail
self.module.fail_json(msg="No size, size_kb, size_mb, size_gb or size_tb"
" attribute found into disk index [%s] configuration." % disk_index)
# Check SCSI controller key
if 'scsi_controller' in disk:
try:
temp_disk_controller = int(disk['scsi_controller'])
except ValueError:
self.module.fail_json(msg="Invalid SCSI controller ID '%s' specified"
" at index [%s]" % (disk['scsi_controller'], disk_index))
if temp_disk_controller not in range(0, 4):
# Only 4 SCSI controllers are allowed per VM
self.module.fail_json(msg="Invalid SCSI controller ID specified [%s],"
" please specify value between 0 to 3 only." % temp_disk_controller)
current_disk['scsi_controller'] = temp_disk_controller
else:
self.module.fail_json(msg="Please specify 'scsi_controller' under disk parameter"
" at index [%s], which is required while creating disk." % disk_index)
# Check for disk unit number
if 'unit_number' in disk:
try:
temp_disk_unit_number = int(disk['unit_number'])
except ValueError:
self.module.fail_json(msg="Invalid Disk unit number ID '%s'"
" specified at index [%s]" % (disk['unit_number'], disk_index))
if temp_disk_unit_number not in range(0, 16):
self.module.fail_json(msg="Invalid Disk unit number ID specified for disk [%s] at index [%s],"
" please specify value between 0 to 15"
" only (excluding 7)." % (temp_disk_unit_number, disk_index))
if temp_disk_unit_number == 7:
self.module.fail_json(msg="Invalid Disk unit number ID specified for disk at index [%s],"
" please specify value other than 7 as it is reserved"
"for SCSI Controller" % disk_index)
current_disk['disk_unit_number'] = temp_disk_unit_number
else:
self.module.fail_json(msg="Please specify 'unit_number' under disk parameter"
" at index [%s], which is required while creating disk." % disk_index)
# Type of Disk
disk_type = disk.get('type', 'thick').lower()
if disk_type not in ['thin', 'thick', 'eagerzeroedthick']:
self.module.fail_json(msg="Invalid 'disk_type' specified for disk index [%s]. Please specify"
" 'disk_type' value from ['thin', 'thick', 'eagerzeroedthick']." % disk_index)
current_disk['disk_type'] = disk_type
# Mode of Disk
temp_disk_mode = disk.get('disk_mode', 'persistent').lower()
if temp_disk_mode not in ['persistent', 'independent_persistent', 'independent_nonpersistent']:
self.module.fail_json(msg="Invalid 'disk_mode' specified for disk index [%s]. Please specify"
" 'disk_mode' value from ['persistent', 'independent_persistent', 'independent_nonpersistent']." % disk_index)
current_disk['disk_mode'] = temp_disk_mode
# SCSI Controller Type
scsi_contrl_type = disk.get('scsi_type', 'paravirtual').lower()
if scsi_contrl_type not in self.scsi_device_type.keys():
self.module.fail_json(msg="Invalid 'scsi_type' specified for disk index [%s]. Please specify"
" 'scsi_type' value from ['%s']" % (disk_index,
"', '".join(self.scsi_device_type.keys())))
current_disk['scsi_type'] = scsi_contrl_type
disks_data.append(current_disk)
return disks_data
def get_recommended_datastore(self, datastore_cluster_obj):
"""
Return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster,
Returns None if no datastore recommendation found.
"""
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
@staticmethod
def gather_disk_facts(vm_obj):
"""
Gather facts about VM's disks
Args:
vm_obj: Managed object of virtual machine
Returns: A list of dict containing disks information
"""
disks_facts = dict()
if vm_obj is None:
return disks_facts
disk_index = 0
for disk in vm_obj.config.hardware.device:
if isinstance(disk, vim.vm.device.VirtualDisk):
disks_facts[disk_index] = dict(
key=disk.key,
label=disk.deviceInfo.label,
summary=disk.deviceInfo.summary,
backing_filename=disk.backing.fileName,
backing_datastore=disk.backing.datastore.name,
backing_disk_mode=disk.backing.diskMode,
backing_writethrough=disk.backing.writeThrough,
backing_thinprovisioned=disk.backing.thinProvisioned,
backing_eagerlyscrub=bool(disk.backing.eagerlyScrub),
controller_key=disk.controllerKey,
unit_number=disk.unitNumber,
capacity_in_kb=disk.capacityInKB,
capacity_in_bytes=disk.capacityInBytes,
)
disk_index += 1
return disks_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
folder=dict(type='str'),
datacenter=dict(type='str', required=True),
disk=dict(type='list', default=[]),
use_instance_uuid=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid', 'moid']
]
)
if module.params['folder']:
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
if not vm:
# We unable to find the virtual machine user specified
# Bail out
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
module.fail_json(msg="Unable to manage disks for non-existing"
" virtual machine '%s'." % vm_id)
# VM exists
try:
pyv.ensure_disks(vm_obj=vm)
except Exception as exc:
module.fail_json(msg="Failed to manage disks for virtual machine"
" '%s' with exception : %s" % (vm.name,
to_native(exc)))
if __name__ == '__main__':
main()
| gpl-3.0 |
ddayguerrero/blogme | flask/lib/python3.4/site-packages/pip/index.py | 45 | 40374 | """Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
import html5lib, requests, pkg_resources
from requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (location, parsed.scheme, ", ".join(secure_schemes),
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, INSTALLED_VERSION, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is INSTALLED_VERSION])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_unverified):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url,
session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
| mit |
jontrulson/upm | examples/python/aeotecdsb09104.py | 7 | 2756 | #!/usr/bin/python
# Author: Jon Trulson <[email protected]>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ozw as sensorObj
def main():
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
defaultDev = "/dev/ttyACM0"
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print("Using device", defaultDev)
# Instantiate an Aeotec DSB09104 instance, on device node 12. You
# will almost certainly need to change this to reflect your own
# network. Use the ozwdump example to see what nodes are available.
sensor = sensorObj.AeotecDSB09104(12)
# The first thing to do is create options, then lock them when done.
sensor.optionsCreate()
sensor.optionsLock()
# Next, initialize it.
print("Initializing, this may take awhile depending on your ZWave network")
sensor.init(defaultDev)
print("Initialization complete")
print("Querying data...")
while (True):
sensor.update()
print("Watts, Channel 1: %0.03f W" % sensor.getWattsC1())
print("Watts, Channel 2: %0.03f W" % sensor.getWattsC2())
print("Watts, Channel 3: %0.03f W" % sensor.getWattsC3())
print("Energy, Channel 1: %0.03f kWh" % sensor.getEnergyC1())
print("Energy, Channel 2: %0.03f kWh" % sensor.getEnergyC2())
print("Energy, Channel 3: %0.03f kWh" % sensor.getEnergyC3())
print("Battery Level: %d\n" % sensor.getBatteryLevel())
time.sleep(3)
if __name__ == '__main__':
main()
| mit |
dshen1/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
nicoboss/Floatmotion | pygame/tests/run_tests__tests/print_stdout/fake_3_test.py | 18 | 1249 | import sys
if __name__ == '__main__':
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
sys.stdout.write("jibberish ruins everything\n")
self.assert_(False)
def test_name(self):
sys.stdout.write("forgot to remove debug crap\n")
self.assert_(True)
def test_set_mods(self):
self.assert_(True)
def test_set_repeat(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
popazerty/e2-gui | lib/python/Components/Renderer/RollerCharLCD.py | 7 | 1894 | from Components.config import config
from Renderer import Renderer
from enigma import eLabel, eTimer
from boxbranding import getMachineProcModel
from Components.VariableText import VariableText
class RollerCharLCD(VariableText, Renderer):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
if getMachineProcModel().startswith("ini-90"):
self.stringlength = 25
else:
self.stringlength = 16
GUI_WIDGET = eLabel
def connect(self, source):
Renderer.connect(self, source)
self.changed((self.CHANGED_DEFAULT,))
def changed(self, what):
if what[0] == self.CHANGED_CLEAR:
self.text = ''
else:
self.text = self.source.text
if len(self.text) > self.stringlength:
self.text = self.source.text + ' ' * self.stringlength + self.source.text[:self.stringlength + 1]
self.x = len(self.text) - self.stringlength
self.idx = 0
self.backtext = self.text
self.status = 'start'
self.moveTimerText = eTimer()
self.moveTimerText.timeout.get().append(self.moveTimerTextRun)
self.moveTimerText.start(2000)
else:
self.text = self.source.text
self.x = len(self.text)
self.idx = 0
self.backtext = self.text
def moveTimerTextRun(self):
self.moveTimerText.stop()
if self.x > 0:
txttmp = self.backtext[self.idx:]
self.text = txttmp[:self.stringlength]
self.idx += 1
self.x -= 1
if self.x == 0:
self.status = 'end'
self.text = self.backtext
if self.status != 'end':
self.scrollspeed = int(config.lcd.scroll_speed.value)
self.moveTimerText.start(self.scrollspeed)
if config.lcd.scroll_delay.value != 'noscrolling':
self.scrolldelay = int(config.lcd.scroll_delay.value)
self.delayTimer = eTimer()
self.delayTimer.timeout.get().append(self.delayTimergo)
self.delayTimer.start(self.scrolldelay)
def delayTimergo(self):
self.delayTimer.stop()
self.changed((self.CHANGED_DEFAULT,))
| gpl-2.0 |
molebot/brython | www/tests/test_strings.py | 4 | 2887 | # strings
assert 'a'.__class__ == str
assert isinstance('a',str)
hello = "This is a rather long string containing\n\
several lines of text just as you would do in C.\n\
Note that whitespace at the beginning of the line is\
significant."
hello = """\
Usage: thingy [OPTIONS]
-h Display this usage message
-H hostname Hostname to connect to
"""
hello = r"This is a rather long string containing\n\
several lines of text much as you would do in C."
word = 'Help' + 'A'
assert word=='HelpA'
assert '<' + word*5 + '>'=='<HelpAHelpAHelpAHelpAHelpA>'
x = 'str' 'ing'
assert x=='string'
assert 'str'.strip() + 'ing'=='string'
# string methods
x='fooss'
assert x.replace('o','X',20) == 'fXXss'
assert 'GhFF'.lower() == 'ghff'
assert x.lstrip('of') == 'ss'
x='aZjhkhZyuy'
assert x.find('Z')==1
assert x.rfind('Z')==6
assert x.rindex('Z')==6
try:
print(x.rindex('K'))
except ValueError:
pass
assert x.split() == [x]
assert x.split('h') == ['aZj', 'k', 'Zyuy']
#print(x.split('h',1))
assert x.startswith('aZ')
assert x.strip('auy') == 'ZjhkhZ'
assert x.upper()=='AZJHKHZYUY'
x = "zer"
assert x.capitalize() == "Zer"
assert str.capitalize(x) == "Zer"
x = "azert$t y t"
assert x.count('t')==3
assert str.count(x,'t')==3
assert x.endswith("y t")==True
assert x.find('t')==4
assert x.find('$')==5
assert x.find('p')==-1
assert x.index('t')==4
items = ['sd','kj']
assert '-'.join(items)=="sd-kj"
assert "ZER".lower()=="zer"
assert "azerty".lstrip('a')=="zerty"
assert "azerty".lstrip('za')=="erty"
assert "azaerty".lstrip('az')=="erty"
assert "$XE$".replace("$XE$", "!")=="!"
assert "$XE".replace("$XE", "!")=='!'
assert "XE$".replace("XE$", "!")=="!"
assert "XE$".replace("$", "!")=="XE!"
assert "$XE".replace("$", "!")=="!XE"
assert "?XE".replace("?", "!")=="!XE"
assert "XE?".replace("?", "!")=="XE!"
assert "XE!".replace("!", "?")=="XE?"
assert "azterty".find('t')==2
assert "azterty".rfind('t')==5
assert "azterty".rfind('p')==-1
assert "azterty".rindex('t')==5
try:
"azterty".rindex('p')
except ValueError:
pass
assert "azerty".rstrip('y')=="azert"
assert "azerty".rstrip('yt')=="azer"
assert "azeryty".rstrip('ty')=="azer"
assert "az er ty".split()==["az","er","ty"]
assert "azferfty".split('f')==["az","er","ty"]
assert " aBc dEf ".split(maxsplit=1)==['aBc','dEf ']
assert " aBc dEf ".split()==['aBc','dEf']
assert "az\ner\nty".splitlines()==["az","er","ty"]
assert "azerty".startswith('az')
assert " azerty ".strip() == "azerty"
assert "bghggbazertyhbg".strip("bhg") == "azerty"
assert "zer".upper() == "ZER"
assert r'(?:([\w ]+) ([\w.]+) .*\[.* ([\d.]+)\])' == (r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])'), 'raw string continuation'
# issue 265
assert "" in "test"
assert "" in ""
assert not "a" in ""
# issue 285
assert "ab"[1:0:-1] == 'b'
print("passed all tests...")
| bsd-3-clause |
dyoung418/tensorflow | tensorflow/tools/dist_test/python/mnist_replica.py | 44 | 10714 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed MNIST training and validation, with model replicas.
A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on one parameter server (ps), while the ops
are executed on two worker nodes by default. The TF sessions also run on the
worker node.
Multiple invocations of this script can be done in parallel, with different
values for --task_index. There should be exactly one invocation with
--task_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.
The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
"Directory for storing mnist data")
flags.DEFINE_boolean("download_only", False,
"Only perform downloading of data; Do not proceed to "
"session preparation, model definition or training")
flags.DEFINE_integer("task_index", None,
"Worker task index, should be >= 0. task_index=0 is "
"the master worker task the performs the variable "
"initialization ")
flags.DEFINE_integer("num_gpus", 1,
"Total number of gpus for each machine."
"If you don't use GPU, please set it to '0'")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update"
"is applied (For sync_replicas mode only; default: "
"num_workers)")
flags.DEFINE_integer("hidden_units", 100,
"Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 200,
"Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_boolean("sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
flags.DEFINE_boolean(
"existing_servers", False, "Whether servers already exists. If True, "
"will use the worker hosts via their GRPC URLs (one client process "
"per worker host). Otherwise, will create an in-process TensorFlow "
"server.")
flags.DEFINE_string("ps_hosts","localhost:2222",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("worker_hosts", "localhost:2223,localhost:2224",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("job_name", None,"job name: worker or ps")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
def main(unused_argv):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
if FLAGS.job_name is None or FLAGS.job_name == "":
raise ValueError("Must specify an explicit `job_name`")
if FLAGS.task_index is None or FLAGS.task_index =="":
raise ValueError("Must specify an explicit `task_index`")
print("job name = %s" % FLAGS.job_name)
print("task index = %d" % FLAGS.task_index)
#Construct the cluster and start the server
ps_spec = FLAGS.ps_hosts.split(",")
worker_spec = FLAGS.worker_hosts.split(",")
# Get the number of workers.
num_workers = len(worker_spec)
cluster = tf.train.ClusterSpec({
"ps": ps_spec,
"worker": worker_spec})
if not FLAGS.existing_servers:
# Not using existing servers. Create an in-process server.
server = tf.train.Server(
cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
is_chief = (FLAGS.task_index == 0)
if FLAGS.num_gpus > 0:
# Avoid gpu allocation conflict: now allocate task_num -> #gpu
# for each worker in the corresponding machine
gpu = (FLAGS.task_index % FLAGS.num_gpus)
worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
elif FLAGS.num_gpus == 0:
# Just allocate the CPU to worker server
cpu = 0
worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
# The ps use CPU and workers use corresponding GPU
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal(
[FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.task_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy, global_step=global_step)
if FLAGS.sync_replicas:
local_init_op = opt.local_step_init_op
if is_chief:
local_init_op = opt.chief_init_op
ready_for_local_init_op = opt.ready_for_local_init_op
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
sync_init_op = opt.get_init_tokens_op()
init_op = tf.global_variables_initializer()
train_dir = tempfile.mkdtemp()
if FLAGS.sync_replicas:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
recovery_wait_secs=1,
global_step=global_step)
else:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])
# The chief worker (task_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.task_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.task_index)
if FLAGS.existing_servers:
server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index]
print("Using existing server at: %s" % server_grpc_url)
sess = sv.prepare_or_wait_for_session(server_grpc_url,
config=sess_config)
else:
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
print("Worker %d: Session initialization complete." % FLAGS.task_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op.
sess.run(sync_init_op)
sv.start_queue_runners(sess, [chief_queue_runner])
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
while True:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs, y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.task_index, local_step, step))
if step >= FLAGS.train_steps:
break
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
nephila/django-knocker | docs/conf.py | 1 | 8313 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
parent = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, parent)
sys.path.insert(0, os.path.join(parent, 'tests'))
import cms_helper # isort:skip
import knocker # isort:skip
cms_helper.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-knocker'
copyright = u'2016, Iacopo Spalletti'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = knocker.__version__
# The full version, including alpha/beta/rc tags.
release = knocker.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-knockerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-knocker.tex', u'django-knocker Documentation',
u'Iacopo Spalletti', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-knocker', u'django-knocker Documentation',
[u'Iacopo Spalletti'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-knocker', u'django-knocker Documentation',
u'Iacopo Spalletti', 'django-knocker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
Romain-Geissler-1A/avro | lang/py/src/avro/protocol.py | 64 | 7963 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Protocol implementation.
"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import json
except ImportError:
import simplejson as json
from avro import schema
#
# Constants
#
# TODO(hammer): confirmed 'fixed' with Doug
VALID_TYPE_SCHEMA_TYPES = ('enum', 'record', 'error', 'fixed')
#
# Exceptions
#
class ProtocolParseException(schema.AvroException):
pass
#
# Base Classes
#
class Protocol(object):
"""An application protocol."""
def _parse_types(self, types, type_names):
type_objects = []
for type in types:
type_object = schema.make_avsc_object(type, type_names)
if type_object.type not in VALID_TYPE_SCHEMA_TYPES:
fail_msg = 'Type %s not an enum, fixed, record, or error.' % type
raise ProtocolParseException(fail_msg)
type_objects.append(type_object)
return type_objects
def _parse_messages(self, messages, names):
message_objects = {}
for name, body in messages.iteritems():
if message_objects.has_key(name):
fail_msg = 'Message name "%s" repeated.' % name
raise ProtocolParseException(fail_msg)
elif not(hasattr(body, 'get') and callable(body.get)):
fail_msg = 'Message name "%s" has non-object body %s.' % (name, body)
raise ProtocolParseException(fail_msg)
request = body.get('request')
response = body.get('response')
errors = body.get('errors')
message_objects[name] = Message(name, request, response, errors, names)
return message_objects
def __init__(self, name, namespace=None, types=None, messages=None):
# Ensure valid ctor args
if not name:
fail_msg = 'Protocols must have a non-empty name.'
raise ProtocolParseException(fail_msg)
elif not isinstance(name, basestring):
fail_msg = 'The name property must be a string.'
raise ProtocolParseException(fail_msg)
elif namespace is not None and not isinstance(namespace, basestring):
fail_msg = 'The namespace property must be a string.'
raise ProtocolParseException(fail_msg)
elif types is not None and not isinstance(types, list):
fail_msg = 'The types property must be a list.'
raise ProtocolParseException(fail_msg)
elif (messages is not None and
not(hasattr(messages, 'get') and callable(messages.get))):
fail_msg = 'The messages property must be a JSON object.'
raise ProtocolParseException(fail_msg)
self._props = {}
self.set_prop('name', name)
type_names = schema.Names()
if namespace is not None:
self.set_prop('namespace', namespace)
type_names.default_namespace = namespace
if types is not None:
self.set_prop('types', self._parse_types(types, type_names))
if messages is not None:
self.set_prop('messages', self._parse_messages(messages, type_names))
self._md5 = md5(str(self)).digest()
# read-only properties
name = property(lambda self: self.get_prop('name'))
namespace = property(lambda self: self.get_prop('namespace'))
fullname = property(lambda self:
schema.Name(self.name, self.namespace).fullname)
types = property(lambda self: self.get_prop('types'))
types_dict = property(lambda self: dict([(type.name, type)
for type in self.types]))
messages = property(lambda self: self.get_prop('messages'))
md5 = property(lambda self: self._md5)
props = property(lambda self: self._props)
# utility functions to manipulate properties dict
def get_prop(self, key):
return self.props.get(key)
def set_prop(self, key, value):
self.props[key] = value
def to_json(self):
to_dump = {}
to_dump['protocol'] = self.name
names = schema.Names(default_namespace=self.namespace)
if self.namespace:
to_dump['namespace'] = self.namespace
if self.types:
to_dump['types'] = [ t.to_json(names) for t in self.types ]
if self.messages:
messages_dict = {}
for name, body in self.messages.iteritems():
messages_dict[name] = body.to_json(names)
to_dump['messages'] = messages_dict
return to_dump
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
class Message(object):
"""A Protocol message."""
def _parse_request(self, request, names):
if not isinstance(request, list):
fail_msg = 'Request property not a list: %s' % request
raise ProtocolParseException(fail_msg)
return schema.RecordSchema(None, None, request, names, 'request')
def _parse_response(self, response, names):
if isinstance(response, basestring) and names.has_name(response, None):
return names.get_name(response, None)
else:
return schema.make_avsc_object(response, names)
def _parse_errors(self, errors, names):
if not isinstance(errors, list):
fail_msg = 'Errors property not a list: %s' % errors
raise ProtocolParseException(fail_msg)
errors_for_parsing = {'type': 'error_union', 'declared_errors': errors}
return schema.make_avsc_object(errors_for_parsing, names)
def __init__(self, name, request, response, errors=None, names=None):
self._name = name
self._props = {}
self.set_prop('request', self._parse_request(request, names))
self.set_prop('response', self._parse_response(response, names))
if errors is not None:
self.set_prop('errors', self._parse_errors(errors, names))
# read-only properties
name = property(lambda self: self._name)
request = property(lambda self: self.get_prop('request'))
response = property(lambda self: self.get_prop('response'))
errors = property(lambda self: self.get_prop('errors'))
props = property(lambda self: self._props)
# utility functions to manipulate properties dict
def get_prop(self, key):
return self.props.get(key)
def set_prop(self, key, value):
self.props[key] = value
def __str__(self):
return json.dumps(self.to_json())
def to_json(self, names=None):
if names is None:
names = schema.Names()
to_dump = {}
to_dump['request'] = self.request.to_json(names)
to_dump['response'] = self.response.to_json(names)
if self.errors:
to_dump['errors'] = self.errors.to_json(names)
return to_dump
def __eq__(self, that):
return self.name == that.name and self.props == that.props
def make_avpr_object(json_data):
"""Build Avro Protocol from data parsed out of JSON string."""
if hasattr(json_data, 'get') and callable(json_data.get):
name = json_data.get('protocol')
namespace = json_data.get('namespace')
types = json_data.get('types')
messages = json_data.get('messages')
return Protocol(name, namespace, types, messages)
else:
raise ProtocolParseException('Not a JSON object: %s' % json_data)
def parse(json_string):
"""Constructs the Protocol from the JSON text."""
try:
json_data = json.loads(json_string)
except:
raise ProtocolParseException('Error parsing JSON: %s' % json_string)
# construct the Avro Protocol object
return make_avpr_object(json_data)
| apache-2.0 |
gtrensch/nest-simulator | pynest/nest/tests/test_facetshw_stdp.py | 20 | 5894 | # -*- coding: utf-8 -*-
#
# test_facetshw_stdp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy as np
import unittest
class FacetsTestCase(unittest.TestCase):
"""
This script is testing the accumulation of spike pairs and
the weight update mechanism as implemented in the FACETS hardware.
Author: Thomas Pfeil
Date of first version: 21.01.2013
"""
def test_facetshw_stdp(self):
nest.ResetKernel()
modelName = 'stdp_facetshw_synapse_hom'
# homogeneous parameters for all synapses
Wmax = 100.0
# see *.cpp file of synapse model and Pfeil et al. 2012 for LUT
# configuration
lut_0 = [2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 14, 15]
lut_1 = [0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13]
lut_2 = range(16) # identity
config_0 = [0, 0, 1, 0]
config_1 = [0, 1, 0, 0]
reset_pattern = 6 * [1] # reset all
# individual parameters for each synapse
# reached every 36 runs (e^(-10/20) = 21.83510375)
lut_th_causal = 21.835
lut_th_acausal = lut_th_causal
# other parameters
startWeight = 0 # as digital value [0, 1, ..., 15]
tau = 20.0
timeBetweenPairs = 100.0
# frequency_of_pairs = 10Hz => delta_t(+) = 10ms, delta_t(-) = 90ms
delay = 5.0
spikesIn = np.arange(10.0, 60000.0, timeBetweenPairs)
synapseDict = {'tau_plus': tau,
'tau_minus_stdp': tau,
'Wmax': Wmax,
'synapses_per_driver': 50,
'driver_readout_time': 15.0,
'lookuptable_0': lut_0,
'lookuptable_1': lut_1,
'lookuptable_2': lut_2,
'configbit_0': config_0,
'configbit_1': config_1,
'reset_pattern': reset_pattern,
'a_thresh_th': lut_th_causal,
'a_thresh_tl': lut_th_acausal}
# build network
stim = nest.Create('spike_generator')
neuronA = nest.Create('parrot_neuron')
neuronB = nest.Create('parrot_neuron')
nest.SetStatus(stim, [{'spike_times': spikesIn}])
nest.SetDefaults(modelName, synapseDict)
# check if GetDefaults returns same values as have been set
synapseDictGet = nest.GetDefaults(modelName)
for key in synapseDict.keys():
self.assertTrue(
all(np.atleast_1d(synapseDictGet[key] == synapseDict[key])))
nest.Connect(stim, neuronA)
nest.Connect(neuronA, neuronB, syn_spec={
'weight': float(startWeight) / 15.0 * Wmax,
'delay': delay, 'synapse_model': modelName})
nest.Simulate(50.0)
weightTrace = []
for run in range(len(spikesIn)):
nest.Simulate(timeBetweenPairs)
connections = nest.GetConnections(neuronA)
if (connections.get('synapse_model') == modelName):
weightTrace.append(
[run, connections.get('weight'),
connections.get('a_causal'),
connections.get('a_acausal')])
# analysis
weightTrace = np.array(weightTrace)
# just before theoretical updates
weightTraceMod36pre = weightTrace[35::36]
# just after theoretical updates
weightTraceMod36 = weightTrace[::36]
weightIndex = int(startWeight)
for i in range(len(weightTraceMod36pre)):
# check weight value before update
# (after spike pair with index 35, 71, ...)
self.assertTrue(np.allclose(weightTraceMod36pre[i][1],
1.0 / 15.0 * weightIndex * Wmax,
atol=1e-6))
weightIndex = lut_0[weightIndex]
weightIndex = int(startWeight)
for i in range(len(weightTraceMod36)):
# check weight value after update
# (after spike pair with index 0, 36, 72, ...)
self.assertTrue(np.allclose(weightTraceMod36[i][1],
1.0 / 15.0 * weightIndex * Wmax,
atol=1e-6))
# check charge on causal capacitor
self.assertTrue(np.allclose(weightTraceMod36[i][2],
np.ones_like(weightTraceMod36[i][2]) *
np.exp(-2 * delay / tau), atol=1e-6))
weightIndex = lut_0[weightIndex]
# check charge on anti-causal capacitor after each pair
for i in range(len(weightTrace) - 1):
# TODO: global params
self.assertTrue(np.allclose(weightTrace[i, 3], ((i % 36) + 1) *
np.exp(-(timeBetweenPairs -
2 * delay) / tau),
atol=1e-6))
def suite():
suite = unittest.makeSuite(FacetsTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
bixbydev/Bixby | google/dist/gdata-2.0.18/tests/gdata_tests/blogger/live_client_test.py | 39 | 5831 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
import gdata.blogger.client
import gdata.blogger.data
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.test_config as conf
conf.options.register_option(conf.BLOG_ID_OPTION)
class BloggerClientTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.blogger.client.BloggerClient()
conf.configure_client(self.client, 'BloggerTest', 'blogger')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
# Add a blog post.
created = self.client.add_post(conf.options.get_value('blogid'),
'test post from BloggerClientTest',
'Hey look, another test!',
labels=['test', 'python'])
self.assertEqual(created.title.text, 'test post from BloggerClientTest')
self.assertEqual(created.content.text, 'Hey look, another test!')
self.assertEqual(len(created.category), 2)
self.assert_(created.control is None)
# Change the title of the blog post we just added.
created.title.text = 'Edited'
updated = self.client.update(created)
self.assertEqual(updated.title.text, 'Edited')
self.assert_(isinstance(updated, gdata.blogger.data.BlogPost))
self.assertEqual(updated.content.text, created.content.text)
# Delete the test entry from the blog.
self.client.delete(updated)
def test_create_draft_post(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_create_draft_post')
# Add a draft blog post.
created = self.client.add_post(conf.options.get_value('blogid'),
'draft test post from BloggerClientTest',
'This should only be a draft.',
labels=['test2', 'python'], draft=True)
self.assertEqual(created.title.text,
'draft test post from BloggerClientTest')
self.assertEqual(created.content.text, 'This should only be a draft.')
self.assertEqual(len(created.category), 2)
self.assert_(created.control is not None)
self.assert_(created.control.draft is not None)
self.assertEqual(created.control.draft.text, 'yes')
# Publish the blog post.
created.control.draft.text = 'no'
updated = self.client.update(created)
if updated.control is not None and updated.control.draft is not None:
self.assertNotEqual(updated.control.draft.text, 'yes')
# Delete the test entry from the blog using the URL instead of the entry.
self.client.delete(updated.find_edit_link())
def test_create_draft_page(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_create_draft_page')
# List all pages on the blog.
pages_before = self.client.get_pages(conf.options.get_value('blogid'))
# Add a draft page to blog.
created = self.client.add_page(conf.options.get_value('blogid'),
'draft page from BloggerClientTest',
'draft content',
draft=True)
self.assertEqual(created.title.text, 'draft page from BloggerClientTest')
self.assertEqual(created.content.text, 'draft content')
self.assert_(created.control is not None)
self.assert_(created.control.draft is not None)
self.assertEqual(created.control.draft.text, 'yes')
self.assertEqual(str(int(created.get_page_id())), created.get_page_id())
# List all pages after adding one.
pages_after = self.client.get_pages(conf.options.get_value('blogid'))
self.assertEqual(len(pages_before.entry) + 1, len(pages_after.entry))
# Publish page.
created.control.draft.text = 'no'
updated = self.client.update(created)
if updated.control is not None and updated.control.draft is not None:
self.assertNotEqual(updated.control.draft.text, 'yes')
# Delete test page.
self.client.delete(updated.find_edit_link())
pages_after = self.client.get_pages(conf.options.get_value('blogid'))
self.assertEqual(len(pages_before.entry), len(pages_after.entry))
def test_retrieve_post_with_categories(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_retrieve_post_with_categories')
query = gdata.blogger.client.Query(categories=["news"], strict=True)
posts = self.client.get_posts(conf.options.get_value('blogid'), query=query)
def suite():
return conf.build_suite([BloggerClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| gpl-3.0 |
sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/pip/_vendor/appdirs.py | 327 | 22368 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - macOS: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "macOS", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
macOS: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
macOS: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
macOS: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| mit |
bingopodcast/bingos | bingo_emulator/graphics/super_7.py | 1 | 58578 |
import pygame
import random
pygame.display.set_caption("Multi Bingo")
screen = pygame.display.set_mode((0,0))
screen.fill([0,0,0])
pygame.mouse.set_visible(False)
meter = pygame.image.load('graphics/assets/black_register_cover.png').convert()
odds = pygame.image.load('super_7/assets/odds.png').convert_alpha()
eb = pygame.image.load('super_7/assets/eb.png').convert_alpha()
eb_number = pygame.image.load('super_7/assets/eb_number.png').convert_alpha()
extra_balls = pygame.image.load('super_7/assets/extra_balls.png').convert_alpha()
time = pygame.image.load('super_7/assets/time.png').convert_alpha()
ml_letter = pygame.image.load('super_7/assets/ml_letter.png').convert_alpha()
ml_arrow = pygame.image.load('super_7/assets/ml_arrow.png').convert_alpha()
ml_a = pygame.image.load('super_7/assets/ml_a.png').convert_alpha()
ml_b = pygame.image.load('super_7/assets/ml_b.png').convert_alpha()
ml_c = pygame.image.load('super_7/assets/ml_c.png').convert_alpha()
select_now = pygame.image.load('super_7/assets/select_now.png').convert_alpha()
tilt = pygame.image.load('super_7/assets/tilt.png').convert_alpha()
button = pygame.image.load('super_7/assets/pap.png').convert_alpha()
red_double = pygame.image.load('super_7/assets/red_double.png').convert_alpha()
green_double = pygame.image.load('super_7/assets/green_double.png').convert_alpha()
yellow_double = pygame.image.load('super_7/assets/yellow_double.png').convert_alpha()
blue_double = pygame.image.load('super_7/assets/blue_double.png').convert_alpha()
four_stars = pygame.image.load('super_7/assets/four_stars.png').convert_alpha()
six_stars = pygame.image.load('super_7/assets/six_stars.png').convert_alpha()
three_stars = pygame.image.load('super_7/assets/three_stars.png').convert_alpha()
three_red = pygame.image.load('super_7/assets/three_red.png').convert_alpha()
two_red = pygame.image.load('super_7/assets/two_red.png').convert_alpha()
red_letter = pygame.image.load('super_7/assets/red_letter.png').convert_alpha()
letter1 = pygame.image.load('super_7/assets/letter1.png').convert_alpha()
letter2 = pygame.image.load('super_7/assets/letter2.png').convert_alpha()
letter3 = pygame.image.load('super_7/assets/letter3.png').convert_alpha()
letter4 = pygame.image.load('super_7/assets/letter4.png').convert_alpha()
letter5 = pygame.image.load('super_7/assets/letter5.png').convert_alpha()
letter6 = pygame.image.load('super_7/assets/letter6.png').convert_alpha()
red_letter1 = pygame.image.load('super_7/assets/red_letter1.png').convert_alpha()
red_letter2 = pygame.image.load('super_7/assets/red_letter2.png').convert_alpha()
red_letter3 = pygame.image.load('super_7/assets/red_letter3.png').convert_alpha()
red_letter4 = pygame.image.load('super_7/assets/red_letter4.png').convert_alpha()
red_letter5 = pygame.image.load('super_7/assets/red_letter5.png').convert_alpha()
red_letter6 = pygame.image.load('super_7/assets/red_letter6.png').convert_alpha()
number_card = pygame.image.load('super_7/assets/number_card.png').convert_alpha()
number = pygame.image.load('super_7/assets/number.png').convert_alpha()
columnb1 = pygame.image.load('super_7/assets/columnb1.png').convert_alpha()
columnb2 = pygame.image.load('super_7/assets/columnb2.png').convert_alpha()
columna = pygame.image.load('super_7/assets/columna.png').convert_alpha()
columnc1 = pygame.image.load('super_7/assets/columnc1.png').convert_alpha()
columnc2 = pygame.image.load('super_7/assets/columnc2.png').convert_alpha()
double_triple = pygame.image.load('super_7/assets/double_triple.png').convert_alpha()
collected = pygame.image.load('super_7/assets/collected.png').convert_alpha()
special_odds = pygame.image.load('super_7/assets/special_odds.png').convert_alpha()
twin_number = pygame.image.load('super_7/assets/twin_number.png').convert_alpha()
seven_odds = pygame.image.load('super_7/assets/seven_odds.png').convert_alpha()
diamond = pygame.image.load('super_7/assets/diamond.png').convert_alpha()
diamond_7 = pygame.image.load('super_7/assets/diamond_7.png').convert_alpha()
ball = pygame.image.load('super_7/assets/ball.png').convert_alpha()
bg_menu = pygame.image.load('super_7/assets/super_7_menu.png').convert_alpha()
bg_gi = pygame.image.load('super_7/assets/super_7_gi.png').convert_alpha()
bg_off = pygame.image.load('super_7/assets/super_7_off.png').convert_alpha()
class scorereel():
""" Score Reels are used to count replays """
def __init__(self, pos, image):
self.position = pos
self.default_y = self.position[1]
self.image = pygame.image.load(image).convert()
reel1 = scorereel([110,797], "graphics/assets/white_reel.png")
reel10 = scorereel([91,797], "graphics/assets/white_reel.png")
reel100 = scorereel([72,797], "graphics/assets/white_reel.png")
reel1000 = scorereel([53,797], "graphics/assets/white_reel.png")
def display(s, replays=0, menu=False):
meter.set_colorkey((255,0,252))
meter_position = [44,797]
screen.blit(reel1.image, reel1.position)
screen.blit(reel10.image, reel10.position)
screen.blit(reel100.image, reel100.position)
screen.blit(reel1000.image, reel1000.position)
screen.blit(meter, meter_position)
if s.game.line2.position == 0:
p = [233,368]
screen.blit(columnb1, p)
p = [284,369]
screen.blit(columnb2, p)
else:
p = [233,368]
screen.blit(columnb2, p)
p = [284,369]
screen.blit(columnb1, p)
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [337,318]
screen.blit(columna, p)
elif s.game.line1.position == 1:
p = [337,368]
screen.blit(columna, p)
else:
p = [337,269]
screen.blit(columna, p)
if s.game.line3.position == 0:
p = [389,368]
screen.blit(columnc1, p)
p = [440,369]
screen.blit(columnc2, p)
else:
p = [389,368]
screen.blit(columnc2, p)
p = [440,369]
screen.blit(columnc1, p)
nc_p = [230,368]
screen.blit(number_card, nc_p)
backglass_position = [0, 0]
backglass = pygame.Surface(screen.get_size(), flags=pygame.SRCALPHA)
backglass.fill((0, 0, 0))
if menu == True:
screen.blit(bg_menu, backglass_position)
else:
if (s.game.anti_cheat.status == True):
screen.blit(bg_gi, backglass_position)
else:
screen.blit(bg_off, backglass_position)
if s.game.eb_play.status == True:
eb_position = [41,1040]
screen.blit(extra_balls, eb_position)
if s.game.extra_ball.position >= 1:
eb_position = [150,1040]
screen.blit(eb_number, eb_position)
if s.game.extra_ball.position >= 2:
eb_position = [201,1040]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 3:
eb_position = [262,1040]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 4:
eb_position = [323,1040]
screen.blit(eb_number, eb_position)
if s.game.extra_ball.position >= 5:
eb_position = [374,1040]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 6:
eb_position = [436,1040]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 7:
eb_position = [498,1040]
screen.blit(eb_number, eb_position)
if s.game.extra_ball.position >= 8:
eb_position = [548,1040]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 9:
eb_position = [610,1040]
screen.blit(eb, eb_position)
if s.game.red_star.status == True:
rs_position = [18,460]
screen.blit(time, rs_position)
if s.game.yellow_star.status == True:
rs_position = [18,504]
screen.blit(time, rs_position)
if s.game.mystic_lines.position >= 4 or s.game.two_red_letter.status == True or s.game.three_red_letter.status == True:
if s.game.selection_feature.position < 7:
bfp = [18,548]
screen.blit(time, bfp)
elif s.game.selection_feature.position in [7,8]:
bfp = [19,416]
screen.blit(time, bfp)
elif s.game.selection_feature.position == 9:
bfp = [18,372]
screen.blit(time, bfp)
if s.game.ball_count.position < 1:
if s.game.odds_only.status == True:
b = [18,874]
screen.blit(button, b)
elif s.game.features.status == True:
b = [18,912]
screen.blit(button, b)
elif s.game.special.status == True:
b = [18,989]
screen.blit(button, b)
else:
b = [18,950]
screen.blit(button, b)
if s.game.mystic_lines.position == 1:
p = [203,680]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 2:
p = [236,680]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 3:
p = [267,680]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position >= 4:
p = [300,683]
screen.blit(ml_a, p)
p = [335,591]
screen.blit(ml_letter, p)
if s.game.mystic_lines.position == 5:
p = [334,680]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 6:
p = [360,681]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position >= 7:
p = [396,682]
screen.blit(ml_b, p)
p = [262,591]
screen.blit(ml_letter, p)
if s.game.mystic_lines.position == 8:
p = [430,680]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 9:
p = [459,680]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 10:
p = [492,682]
screen.blit(ml_c, p)
p = [410,591]
screen.blit(ml_letter, p)
if s.game.mystic_lines.position >= 4:
t = 3
if s.game.selection_feature.position in [7,8]:
t = 4
if s.game.selection_feature.position == 9:
t = 5
if s.game.ball_count.position == t:
s.cancel_delayed(name="blink")
blink([s,1,1])
else:
s.cancel_delayed(name="blink")
if s.game.tilt.status == False:
if s.holes:
if 1 in s.holes:
if s.game.line2.position == 0:
p = [284,526]
screen.blit(number, p)
else:
p = [234,529]
screen.blit(number, p)
if 2 in s.holes:
if s.game.line2.position == 0:
p = [282,377]
screen.blit(number, p)
else:
p = [232,378]
screen.blit(number, p)
if 3 in s.holes:
if s.game.line2.position == 0:
p = [232,427]
screen.blit(number, p)
else:
p = [282,426]
screen.blit(number, p)
if 4 in s.holes:
if s.game.line3.position == 0:
p = [387,378]
screen.blit(number, p)
else:
p = [440,378]
screen.blit(number, p)
if 5 in s.holes:
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [336,477]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [336,526]
screen.blit(number, p)
else:
p = [336,428]
screen.blit(number, p)
if 6 in s.holes:
if s.game.line3.position == 0:
p = [440,378]
screen.blit(number, p)
else:
p = [387,378]
screen.blit(number, p)
if 7 in s.holes:
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [336,526]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [334,377]
screen.blit(number, p)
else:
p = [336,476]
screen.blit(number, p)
if 8 in s.holes:
if s.game.line2.position == 0:
p = [232,378]
screen.blit(number, p)
else:
p = [282,378]
screen.blit(number, p)
if 9 in s.holes:
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [336,427]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [336,476]
screen.blit(number, p)
else:
p = [336,377]
screen.blit(number, p)
if 10 in s.holes:
if s.game.line3.position == 0:
p = [442,477]
screen.blit(number, p)
else:
p = [388,476]
screen.blit(number, p)
if 11 in s.holes:
if s.game.line3.position == 0:
p = [388,428]
screen.blit(number, p)
else:
p = [442,428]
screen.blit(number, p)
if 12 in s.holes:
if s.game.line3.position == 0:
p = [387,476]
screen.blit(number, p)
else:
p = [442,478]
screen.blit(number, p)
if 13 in s.holes:
if s.game.line3.position == 0:
p = [442,526]
screen.blit(number, p)
else:
p = [387,526]
screen.blit(number, p)
if 14 in s.holes:
if s.game.line3.position == 0:
p = [442,428]
screen.blit(number, p)
else:
p = [388,428]
screen.blit(number, p)
if 15 in s.holes:
if s.game.line2.position == 0:
p = [282,426]
screen.blit(number, p)
else:
p = [232,426]
screen.blit(number, p)
if 16 in s.holes:
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [336,378]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [336,428]
screen.blit(number, p)
else:
p = [336,526]
screen.blit(number, p)
if 17 in s.holes:
if s.game.line2.position == 0:
p = [285,479]
screen.blit(number, p)
else:
p = [233,479]
screen.blit(number, p)
if 18 in s.holes:
if s.game.line2.position == 0:
p = [233,479]
screen.blit(number, p)
else:
p = [285,479]
screen.blit(number, p)
if 19 in s.holes:
if s.game.line3.position == 0:
p = [387,526]
screen.blit(number, p)
else:
p = [442,526]
screen.blit(number, p)
if 20 in s.holes:
if s.game.line2.position == 0:
p = [232,528]
screen.blit(number, p)
else:
p = [284,526]
screen.blit(number, p)
if s.game.red_odds.position == 1:
o = [192,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 2:
o = [230,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 3:
o = [267,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 4:
o = [305,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 5:
o = [343,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 6:
o = [385,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 7:
o = [436,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 8:
o = [483,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 9:
o = [530,783]
screen.blit(odds, o)
elif s.game.red_odds.position == 10:
o = [578,783]
screen.blit(odds, o)
if s.game.green_odds.position == 1:
o = [192,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 2:
o = [230,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 3:
o = [267,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 4:
o = [305,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 5:
o = [343,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 6:
o = [385,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 7:
o = [436,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 8:
o = [483,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 9:
o = [530,843]
screen.blit(odds, o)
elif s.game.green_odds.position == 10:
o = [578,843]
screen.blit(odds, o)
if s.game.yellow_odds.position == 1:
o = [192,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 2:
o = [230,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 3:
o = [267,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 4:
o = [305,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 5:
o = [343,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 6:
o = [385,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 7:
o = [436,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 8:
o = [483,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 9:
o = [530,907]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 10:
o = [578,907]
screen.blit(odds, o)
if s.game.blue_odds.position == 1:
o = [192,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 2:
o = [230,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 3:
o = [267,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 4:
o = [305,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 5:
o = [343,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 6:
o = [385,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 7:
o = [436,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 8:
o = [483,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 9:
o = [530,973]
screen.blit(odds, o)
elif s.game.blue_odds.position == 10:
o = [578,973]
screen.blit(odds, o)
p = [307,217]
screen.blit(letter1, p)
p = [346,217]
screen.blit(letter2, p)
p = [402,217]
screen.blit(letter3, p)
p = [451,217]
screen.blit(letter4, p)
p = [497,217]
screen.blit(letter5, p)
p = [572,217]
screen.blit(letter6, p)
if s.game.red_odds.position < 5:
p = [307,217]
screen.blit(red_letter1, p)
if s.game.red_odds.position in [5,6]:
p = [346,217]
screen.blit(red_letter2, p)
if s.game.red_odds.position == 7:
p = [402,217]
screen.blit(red_letter3, p)
if s.game.red_odds.position == 8:
p = [451,217]
screen.blit(red_letter4, p)
if s.game.red_odds.position == 9:
p = [497,217]
screen.blit(red_letter5, p)
if s.game.red_odds.position == 10:
p = [572,217]
screen.blit(red_letter6, p)
if s.game.two_red_letter.status == True:
p = [18,258]
screen.blit(red_letter, p)
p = [92,220]
screen.blit(two_red, p)
if s.game.three_red_letter.status == True:
p = [18,258]
screen.blit(red_letter, p)
p = [18,219]
screen.blit(three_red, p)
if s.game.three_stars.status == True:
p = [18,297]
screen.blit(four_stars, p)
p = [18,334]
screen.blit(three_stars, p)
if s.game.six_stars.status == True:
p = [18,297]
screen.blit(four_stars, p)
p = [92,334]
screen.blit(six_stars, p)
if s.game.double_red.status == True:
p = [20,610]
screen.blit(red_double, p)
if s.game.double_yellow.status == True:
p = [94,610]
screen.blit(yellow_double, p)
if s.game.double_green.status == True:
p = [20,683]
screen.blit(green_double, p)
if s.game.double_blue.status == True:
p = [94,683]
screen.blit(blue_double, p)
if s.game.triple.status == False and (s.game.double_red.status == True or s.game.double_yellow.status == True or s.game.double_green.status == True or s.game.double_blue.status == True):
p = [52,680]
screen.blit(double_triple, p)
if s.game.triple.status == True and (s.game.double_red.status == True or s.game.double_yellow.status == True or s.game.double_green.status == True or s.game.double_blue.status == True):
p = [52,647]
screen.blit(double_triple, p)
if s.game.tilt.status == True:
tilt_position = [652,817]
screen.blit(tilt, tilt_position)
# Special Game
if s.game.special_odds.position > 0:
if s.game.special_odds.position == 1:
p = [600,512]
screen.blit(special_odds, p)
p = [547,511]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 2:
p = [599,482]
screen.blit(special_odds, p)
p = [547,482]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 3:
p = [599,453]
screen.blit(special_odds, p)
p = [547,452]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 4:
p = [599,424]
screen.blit(special_odds, p)
p = [547,424]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 5:
p = [599,395]
screen.blit(special_odds, p)
p = [547,394]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 6:
p = [598,366]
screen.blit(special_odds, p)
p = [547,366]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 7:
p = [598,337]
screen.blit(special_odds, p)
p = [548,336]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 8:
p = [598,308]
screen.blit(special_odds, p)
p = [548,308]
screen.blit(seven_odds, p)
if s.game.special_odds.position == 9:
p = [599,278]
screen.blit(special_odds, p)
p = [548,279]
screen.blit(seven_odds, p)
if s.game.special_odds.position > 0:
if s.game.special_replay_counter.position > 0:
p = [608,732]
screen.blit(collected, p)
if s.game.ball_count.position < 3:
p = [531,731]
screen.blit(collected, p)
if s.game.special_game.position == 2:
p = [598,540]
screen.blit(ball, p)
p = [608,635]
screen.blit(collected, p)
if s.game.special_game.position == 3:
p = [626,540]
screen.blit(ball, p)
p = [608,635]
screen.blit(collected, p)
if s.game.special_game.position == 4:
p = [656,540]
screen.blit(ball, p)
p = [608,635]
screen.blit(collected, p)
if s.game.missed.status == True:
p = [608,684]
screen.blit(collected, p)
if s.game.twin_number.position == 1:
p = [204,739]
screen.blit(ml_arrow, p)
elif s.game.twin_number.position == 2:
p = [236,738]
screen.blit(ml_arrow, p)
elif s.game.twin_number.position == 3:
p = [269,738]
screen.blit(ml_arrow, p)
if s.game.twin_number.position >= 4:
if s.game.twelve.status == True:
p = [300,728]
screen.blit(twin_number, p)
if s.game.eight.status == True:
p = [300,752]
screen.blit(twin_number, p)
if s.game.twin_number.position == 5:
p = [370,739]
screen.blit(ml_arrow, p)
elif s.game.twin_number.position == 6:
p = [400,739]
screen.blit(ml_arrow, p)
elif s.game.twin_number.position == 7:
p = [430,739]
screen.blit(ml_arrow, p)
if s.game.twin_number.position == 8:
if s.game.eight.status == True:
p = [462,730]
screen.blit(twin_number, p)
if s.game.twelve.status == True:
p = [462,752]
screen.blit(twin_number, p)
if s.game.bonus.position == 1:
p = [552,702]
screen.blit(diamond, p)
elif s.game.bonus.position == 2:
p = [535,686]
screen.blit(diamond, p)
elif s.game.bonus.position == 3:
p = [536,660]
screen.blit(diamond, p)
elif s.game.bonus.position == 4:
p = [535,635]
screen.blit(diamond, p)
elif s.game.bonus.position == 5:
p = [535,608]
screen.blit(diamond, p)
elif s.game.bonus.position == 6:
p = [534,584]
screen.blit(diamond, p)
elif s.game.bonus.position == 7:
p = [546,552]
screen.blit(diamond_7, p)
elif s.game.bonus.position == 8:
p = [572,582]
screen.blit(diamond, p)
elif s.game.bonus.position == 9:
p = [573,608]
screen.blit(diamond, p)
elif s.game.bonus.position == 10:
p = [573,634]
screen.blit(diamond, p)
elif s.game.bonus.position == 11:
p = [574,660]
screen.blit(diamond, p)
elif s.game.bonus.position == 12:
p = [574,686]
screen.blit(diamond, p)
pygame.display.update()
def blink(args):
dirty_rects = []
s = args[0]
b = args[1]
sn = args[2]
if b == 0:
if sn == 1:
p = [287,640]
dirty_rects.append(screen.blit(select_now, p))
pygame.display.update(dirty_rects)
else:
dirty_rects.append(screen.blit(bg_gi, (287,640), pygame.Rect(287,640,146,30)))
pygame.display.update(dirty_rects)
b = not b
args = [s,b,sn]
s.delay(name="blink", delay=0.1, handler=blink, param=args)
def line1_animation(args):
dirty_rects = []
s = args[0]
num = args[1]
line = args[2]
if line == 1:
if s.game.line1.position == 0:
dirty_rects.append(screen.blit(columna, (337, 269 - num)))
elif s.game.line1.position == 1:
dirty_rects.append(screen.blit(columna, (337, 318 - num)))
elif s.game.line1.position == 2:
dirty_rects.append(screen.blit(columna, (337, 368 + num)))
elif s.game.line1.position == 3:
dirty_rects.append(screen.blit(columna, (337, 318 + num)))
nc_p = [230,368]
dirty_rects.append(screen.blit(number_card, nc_p))
if (s.game.anti_cheat.status == True):
dirty_rects.append(screen.blit(bg_gi, (224,264), pygame.Rect(224,264,270,408)))
else:
dirty_rects.append(screen.blit(bg_off, (224,264), pygame.Rect(224,264,270,408)))
p = [307,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],42,57)))
dirty_rects.append(screen.blit(letter1, p))
p = [346,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],55,57)))
dirty_rects.append(screen.blit(letter2, p))
p = [402,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,57)))
dirty_rects.append(screen.blit(letter3, p))
p = [451,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],43,57)))
dirty_rects.append(screen.blit(letter4, p))
p = [497,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],54,57)))
dirty_rects.append(screen.blit(letter5, p))
p = [572,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],53,57)))
dirty_rects.append(screen.blit(letter6, p))
if s.game.red_odds.position < 5:
p = [307,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],42,57)))
dirty_rects.append(screen.blit(letter1, p))
dirty_rects.append(screen.blit(red_letter1, p))
if s.game.red_odds.position in [5,6]:
p = [346,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],55,57)))
dirty_rects.append(screen.blit(letter2, p))
dirty_rects.append(screen.blit(red_letter2, p))
if s.game.red_odds.position == 7:
p = [402,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,57)))
dirty_rects.append(screen.blit(letter3, p))
dirty_rects.append(screen.blit(red_letter3, p))
if s.game.red_odds.position == 8:
p = [451,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],43,57)))
dirty_rects.append(screen.blit(letter4, p))
dirty_rects.append(screen.blit(red_letter4, p))
if s.game.red_odds.position == 9:
p = [497,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],54,57)))
dirty_rects.append(screen.blit(letter5, p))
dirty_rects.append(screen.blit(red_letter5, p))
if s.game.red_odds.position == 10:
p = [572,217]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],53,57)))
dirty_rects.append(screen.blit(letter6, p))
dirty_rects.append(screen.blit(red_letter6, p))
if s.game.mystic_lines.position >= 4:
p = [335,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position >= 7:
p = [262,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 10:
p = [410,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
def line2_animation(args):
dirty_rects = []
s = args[0]
num = args[1]
line = args[2]
if line == 2:
if s.game.line2.position == 0:
dirty_rects.append(screen.blit(columnb2, (233 - num, 369)))
dirty_rects.append(screen.blit(columnb1, (286 + num, 369)))
elif s.game.line2.position == 1:
dirty_rects.append(screen.blit(columnb1, (233 - num, 369)))
dirty_rects.append(screen.blit(columnb2, (286 + num, 369)))
nc_p = [230,368]
dirty_rects.append(screen.blit(number_card, nc_p))
if (s.game.anti_cheat.status == True):
dirty_rects.append(screen.blit(bg_gi, (233,369), pygame.Rect(233,369,270,212)))
else:
dirty_rects.append(screen.blit(bg_off, (233,369), pygame.Rect(233,369,270,212)))
if s.game.mystic_lines.position >= 4:
p = [335,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position >= 7:
p = [262,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 10:
p = [410,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
def line3_animation(args):
dirty_rects = []
s = args[0]
num = args[1]
line = args[2]
if line == 3:
if s.game.line3.position == 0:
dirty_rects.append(screen.blit(columnc2, (389 - num, 369)))
dirty_rects.append(screen.blit(columnc1, (440 + num, 369)))
elif s.game.line3.position == 1:
dirty_rects.append(screen.blit(columnc1, (389 - num, 369)))
dirty_rects.append(screen.blit(columnc2, (440 + num, 369)))
nc_p = [230,368]
dirty_rects.append(screen.blit(number_card, nc_p))
if (s.game.anti_cheat.status == True):
dirty_rects.append(screen.blit(bg_gi, (230,369), pygame.Rect(230,369,273,212)))
else:
dirty_rects.append(screen.blit(bg_off, (230,369), pygame.Rect(230,369,273,212)))
if s.game.mystic_lines.position >= 4:
p = [335,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position >= 7:
p = [262,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 10:
p = [410,591]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],49,48)))
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
def eb_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
if s.game.extra_ball.position < 1:
dirty_rects.append(screen.blit(bg_gi, (150,1040), pygame.Rect(150,1040,47,31)))
if s.game.extra_ball.position < 2:
dirty_rects.append(screen.blit(bg_gi, (201,1040), pygame.Rect(201,1040,59,34)))
if s.game.extra_ball.position < 3:
dirty_rects.append(screen.blit(bg_gi, (262,1040), pygame.Rect(262,1040,59,34)))
if s.game.extra_ball.position < 4:
dirty_rects.append(screen.blit(bg_gi, (323,1040), pygame.Rect(323,1040,47,31)))
if s.game.extra_ball.position < 5:
dirty_rects.append(screen.blit(bg_gi, (374,1040), pygame.Rect(374,1040,59,34)))
if s.game.extra_ball.position < 6:
dirty_rects.append(screen.blit(bg_gi, (436,1040), pygame.Rect(436,1040,59,34)))
if s.game.extra_ball.position < 7:
dirty_rects.append(screen.blit(bg_gi, (498,1040), pygame.Rect(498,1040,47,31)))
if s.game.extra_ball.position < 8:
dirty_rects.append(screen.blit(bg_gi, (548,1040), pygame.Rect(548,1040,59,34)))
if s.game.extra_ball.position < 9:
dirty_rects.append(screen.blit(bg_gi, (610,1040), pygame.Rect(610,1040,59,34)))
pygame.display.update(dirty_rects)
if num in [0,25,14,49]:
if s.game.extra_ball.position < 1:
p = [150,1040]
dirty_rects.append(screen.blit(eb_number, p))
pygame.display.update(dirty_rects)
return
elif num in [39,1,26,15]:
if s.game.extra_ball.position < 2:
p = [201,1040]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [3,4,17,28,29,40]:
if s.game.extra_ball.position < 3:
p = [262,1040]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [5,18,30,43]:
if s.game.extra_ball.position < 4:
p = [323,1040]
dirty_rects.append(screen.blit(eb_number, p))
pygame.display.update(dirty_rects)
return
elif num in [7,8,19,32,33,44]:
if s.game.extra_ball.position < 5:
p = [374,1040]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [9,10,20,34,35,45]:
if s.game.extra_ball.position < 6:
p = [436,1040]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [11,21,36,46]:
if s.game.extra_ball.position < 7:
p = [498,1040]
dirty_rects.append(screen.blit(eb_number, p))
pygame.display.update(dirty_rects)
return
elif num in [12,22,37,47]:
if s.game.extra_ball.position < 8:
p = [548,1040]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [2,6,13,16,23,27,31,38,41,48]:
if s.game.extra_ball.position < 9:
p = [610,1040]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
def clear_odds(s, num):
global screen
dirty_rects = []
if s.game.double_red.status == False:
dirty_rects.append(screen.blit(bg_gi, (20,610), pygame.Rect(20,610,74,74)))
if s.game.double_yellow.status == False:
dirty_rects.append(screen.blit(bg_gi, (94,610), pygame.Rect(94,610,74,74)))
if s.game.double_green.status == False:
dirty_rects.append(screen.blit(bg_gi, (20,683), pygame.Rect(20,683,74,74)))
if s.game.double_blue.status == False:
dirty_rects.append(screen.blit(bg_gi, (94,683), pygame.Rect(94,683,74,74)))
if s.game.yellow_odds.position != 2:
dirty_rects.append(screen.blit(bg_gi, (230,907), pygame.Rect(230,907,46,61)))
if s.game.yellow_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (343,907), pygame.Rect(343,907,46,61)))
if s.game.yellow_odds.position != 6:
dirty_rects.append(screen.blit(bg_gi, (385,907), pygame.Rect(385,907,46,61)))
if s.game.yellow_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (436,907), pygame.Rect(436,907,46,61)))
if s.game.yellow_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (483,907), pygame.Rect(483,907,46,61)))
if s.game.yellow_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (530,907), pygame.Rect(530,907,46,61)))
if s.game.yellow_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (578,907), pygame.Rect(578,907,46,61)))
if s.game.red_odds.position != 3:
dirty_rects.append(screen.blit(bg_gi, (267,783), pygame.Rect(267,783,46,61)))
if s.game.red_odds.position != 4:
dirty_rects.append(screen.blit(bg_gi, (305,783), pygame.Rect(305,783,46,61)))
if s.game.red_odds.position != 6:
dirty_rects.append(screen.blit(bg_gi, (385,783), pygame.Rect(385,783,46,61)))
if s.game.red_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (436,783), pygame.Rect(436,783,46,61)))
if s.game.red_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (483,783), pygame.Rect(483,783,46,61)))
if s.game.red_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (530,783), pygame.Rect(530,783,46,61)))
if s.game.red_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (578,783), pygame.Rect(578,783,46,61)))
if s.game.blue_odds.position != 2:
dirty_rects.append(screen.blit(bg_gi, (230,973), pygame.Rect(230,973,46,61)))
dirty_rects.append(screen.blit(bg_gi, (230,843), pygame.Rect(230,843,46,61)))
if s.game.blue_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (343,973), pygame.Rect(343,973,46,61)))
dirty_rects.append(screen.blit(bg_gi, (343,843), pygame.Rect(343,843,46,61)))
if s.game.blue_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (436,973), pygame.Rect(436,973,46,61)))
dirty_rects.append(screen.blit(bg_gi, (436,843), pygame.Rect(436,843,46,61)))
if s.game.blue_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (483,973), pygame.Rect(483,973,46,61)))
dirty_rects.append(screen.blit(bg_gi, (483,843), pygame.Rect(483,843,46,61)))
if s.game.blue_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (530,973), pygame.Rect(530,973,46,61)))
dirty_rects.append(screen.blit(bg_gi, (530,843), pygame.Rect(530,843,46,61)))
if s.game.blue_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (578,973), pygame.Rect(578,973,46,61)))
dirty_rects.append(screen.blit(bg_gi, (578,843), pygame.Rect(578,843,46,61)))
pygame.display.update(dirty_rects)
def draw_odds_animation(s, num):
global screen
dirty_rects = []
if num in [7,32]:
if s.game.double_red.status == False:
p = [20,610]
dirty_rects.append(screen.blit(red_double, p))
pygame.display.update(dirty_rects)
return
if num in [15,40]:
if s.game.double_yellow.status == False:
p = [94,608]
dirty_rects.append(screen.blit(yellow_double, p))
pygame.display.update(dirty_rects)
return
if num in [0,25]:
if s.game.double_green.status == False:
p = [20,683]
dirty_rects.append(screen.blit(green_double, p))
pygame.display.update(dirty_rects)
return
if num in [9,34]:
if s.game.double_blue.status == False:
p = [94,683]
dirty_rects.append(screen.blit(blue_double, p))
pygame.display.update(dirty_rects)
return
if num in [22,47]:
if s.game.yellow_odds.position != 2:
p = [230,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [13,38]:
if s.game.yellow_odds.position != 5:
p = [343,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [20,45]:
if s.game.yellow_odds.position != 6:
p = [385,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [20,45]:
if s.game.yellow_odds.position != 7:
p = [436,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [12,37]:
if s.game.yellow_odds.position != 8:
p = [483,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [7,32]:
if s.game.yellow_odds.position != 9:
p = [530,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [21,46]:
if s.game.yellow_odds.position != 10:
p = [578,907]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [2,27]:
if s.game.red_odds.position != 3:
p = [267,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [10,35]:
if s.game.red_odds.position != 4:
p = [305,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [12,37]:
if s.game.red_odds.position != 6:
p = [385,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [6,31]:
if s.game.red_odds.position != 7:
p = [436,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [14,39]:
if s.game.red_odds.position != 8:
p = [483,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [1,26]:
if s.game.red_odds.position != 9:
p = [530,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [20,45]:
if s.game.red_odds.position != 10:
p = [578,783]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [8,33]:
if s.game.blue_odds.position != 2:
p = [230,973]
dirty_rects.append(screen.blit(odds, p))
p = [230,843]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [23,48]:
if s.game.blue_odds.position != 5:
p = [343,973]
dirty_rects.append(screen.blit(odds, p))
p = [343,843]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [3,28]:
if s.game.blue_odds.position != 7:
p = [436,973]
dirty_rects.append(screen.blit(odds, p))
p = [436,843]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
if num in [18,43]:
if s.game.blue_odds.position != 8:
p = [483,973]
dirty_rects.append(screen.blit(odds, p))
p = [483,843]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [5,30]:
if s.game.blue_odds.position != 9:
p = [530,973]
dirty_rects.append(screen.blit(odds, p))
p = [530,843]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [16,41]:
if s.game.blue_odds.position != 10:
p = [578,973]
dirty_rects.append(screen.blit(odds, p))
p = [578,843]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
def odds_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
clear_odds(s, num)
draw_odds_animation(s, num)
def clear_features(s, num):
global screen
dirty_rects = []
if s.game.selection_feature.position > 7:
dirty_rects.append(screen.blit(bg_gi, (18,548), pygame.Rect(18,548,148,48)))
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (18,548), pygame.Rect(18,548,148,48)))
if s.game.selection_feature.position not in [7,8]:
dirty_rects.append(screen.blit(bg_gi, (19,416), pygame.Rect(19,416,148,48)))
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (19,416), pygame.Rect(19,416,148,48)))
if s.game.selection_feature.position < 9:
dirty_rects.append(screen.blit(bg_gi, (18,372), pygame.Rect(18,372,148,48)))
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (18,372), pygame.Rect(18,372,148,48)))
if s.game.yellow_star.status == False:
dirty_rects.append(screen.blit(bg_gi, (18,504), pygame.Rect(18,504,148,48)))
if s.game.red_star.status == False:
dirty_rects.append(screen.blit(bg_gi, (18,460), pygame.Rect(18,460,148,48)))
if s.game.two_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (92,220), pygame.Rect(92,220,76,41)))
if s.game.three_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (18,219), pygame.Rect(18,219,76,41)))
if s.game.three_stars.status == False:
dirty_rects.append(screen.blit(bg_gi, (18,334), pygame.Rect(18,334,77,27)))
if s.game.six_stars.status == False:
dirty_rects.append(screen.blit(bg_gi, (92,334), pygame.Rect(92,334,77,27)))
if s.game.mystic_lines.position != 2:
dirty_rects.append(screen.blit(bg_gi, (236,680), pygame.Rect(236,680,29,29)))
if s.game.mystic_lines.position < 4:
dirty_rects.append(screen.blit(bg_gi, (335,591), pygame.Rect(335,591,49,48)))
if s.game.mystic_lines.position != 5:
dirty_rects.append(screen.blit(bg_gi, (334,680), pygame.Rect(334,680,29,29)))
if s.game.mystic_lines.position < 7:
dirty_rects.append(screen.blit(bg_gi, (262,591), pygame.Rect(262,591,49,48)))
if s.game.mystic_lines.position != 9:
dirty_rects.append(screen.blit(bg_gi, (459,680), pygame.Rect(459,680,29,29)))
if s.game.mystic_lines.position < 10:
dirty_rects.append(screen.blit(bg_gi, (410,591), pygame.Rect(410,591,49,48)))
pygame.display.update(dirty_rects)
def draw_feature_animation(s, num):
global screen
dirty_rects = []
if num in [10,35]:
if s.game.selection_feature.position not in [1,2,3,4,5,6] and (s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False):
p = [18,548]
dirty_rects.append(screen.blit(time, p))
pygame.display.update(dirty_rects)
return
if num in [9,34]:
if s.game.selection_feature.position not in [7,8] and (s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False):
p = [19,416]
dirty_rects.append(screen.blit(time, p))
pygame.display.update(dirty_rects)
return
if num in [6,31]:
if s.game.selection_feature.position not in [9] and (s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False):
p = [18,372]
dirty_rects.append(screen.blit(time, p))
pygame.display.update(dirty_rects)
return
if num in [11,36]:
if s.game.red_star.status == False:
p = [18,460]
dirty_rects.append(screen.blit(time, p))
pygame.display.update(dirty_rects)
s.game.coils.redROLamp.pulse(85)
return
if num in [4,29]:
if s.game.yellow_star.status == False:
p = [18,504]
dirty_rects.append(screen.blit(time, p))
pygame.display.update(dirty_rects)
s.game.coils.yellowROLamp.pulse(85)
return
if num in [13,38]:
if s.game.three_red_letter.status == False:
p = [18,219]
dirty_rects.append(screen.blit(three_red, p))
pygame.display.update(dirty_rects)
return
if num in [44,19]:
if s.game.two_red_letter.status == False:
p = [92,220]
dirty_rects.append(screen.blit(two_red, p))
pygame.display.update(dirty_rects)
return
if num in [16,41]:
if s.game.three_stars.status == False:
p = [18,334]
dirty_rects.append(screen.blit(three_stars, p))
pygame.display.update(dirty_rects)
return
if num in [4,29]:
if s.game.six_stars.status == False:
p = [92,334]
dirty_rects.append(screen.blit(six_stars, p))
pygame.display.update(dirty_rects)
return
if num in [5,30]:
if s.game.mystic_lines.position != 2:
p = [236,680]
dirty_rects.append(screen.blit(ml_arrow, p))
pygame.display.update(dirty_rects)
return
if num in [23,48]:
if s.game.mystic_lines.position < 4:
p = [335,591]
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
return
if num in [0,25]:
if s.game.mystic_lines.position != 5:
p = [334,680]
dirty_rects.append(screen.blit(ml_arrow, p))
pygame.display.update(dirty_rects)
return
if num in [12,37,22,47]:
if s.game.mystic_lines.position < 7:
p = [262,591]
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
return
if num in [9,34]:
if s.game.mystic_lines.position != 9:
p = [459,680]
dirty_rects.append(screen.blit(ml_arrow, p))
pygame.display.update(dirty_rects)
return
if num in [10,35,24,49]:
if s.game.mystic_lines.position < 10:
p = [410,591]
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
return
def feature_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
clear_features(s, num)
draw_feature_animation(s, num)
def both_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
clear_features(s, num)
clear_odds(s, num)
draw_odds_animation(s, num)
draw_feature_animation(s, num)
def special_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
if s.game.special_odds.position != 2:
dirty_rects.append(screen.blit(bg_gi, (599,482), pygame.Rect(599,482,90,30)))
dirty_rects.append(screen.blit(bg_gi, (547,482), pygame.Rect(547,482,42,32)))
if s.game.special_odds.position != 3:
dirty_rects.append(screen.blit(bg_gi, (599,453), pygame.Rect(599,453,90,30)))
dirty_rects.append(screen.blit(bg_gi, (547,452), pygame.Rect(547,452,42,32)))
if s.game.special_odds.position != 4:
dirty_rects.append(screen.blit(bg_gi, (599,424), pygame.Rect(599,424,90,30)))
dirty_rects.append(screen.blit(bg_gi, (547,424), pygame.Rect(547,424,42,32)))
if s.game.special_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (599,395), pygame.Rect(599,395,90,30)))
dirty_rects.append(screen.blit(bg_gi, (547,394), pygame.Rect(547,394,42,32)))
if s.game.special_odds.position != 6:
dirty_rects.append(screen.blit(bg_gi, (598,366), pygame.Rect(598,366,90,30)))
dirty_rects.append(screen.blit(bg_gi, (547,366), pygame.Rect(547,366,42,32)))
if s.game.special_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (598,337), pygame.Rect(598,337,90,30)))
dirty_rects.append(screen.blit(bg_gi, (548,336), pygame.Rect(548,336,42,32)))
if s.game.special_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (598,308), pygame.Rect(598,308,90,30)))
dirty_rects.append(screen.blit(bg_gi, (548,308), pygame.Rect(548,308,42,32)))
if s.game.special_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (599,278), pygame.Rect(599,278,90,30)))
dirty_rects.append(screen.blit(bg_gi, (548,279), pygame.Rect(548,279,42,32)))
pygame.display.update(dirty_rects)
if num in [18,19,43,44]:
if s.game.special_odds.position < 2:
p = [599,482]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,482]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [20,21,45,46]:
if s.game.special_odds.position < 3:
p = [599,453]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,452]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [14,15,39,40]:
if s.game.special_odds.position < 4:
p = [599,424]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,424]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [16,17,41,42]:
if s.game.special_odds.position < 5:
p = [599,395]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,394]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [6,7,10,11,31,32,35,36]:
if s.game.special_odds.position < 6:
p = [598,366]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,366]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [4,5,12,13,29,30,37,38]:
if s.game.special_odds.position < 7:
p = [598,337]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,336]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [0,1,2,3,8,9,25,26,27,28,33,34]:
if s.game.special_odds.position < 8:
p = [598,308]
dirty_rects.append(screen.blit(special_odds, p))
p = [547,308]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
if num in [22,23,47,48]:
if s.game.special_odds.position < 9:
p = [599,278]
dirty_rects.append(screen.blit(special_odds, p))
p = [548,279]
dirty_rects.append(screen.blit(seven_odds, p))
pygame.display.update(dirty_rects)
return
| gpl-3.0 |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py | 2 | 3516 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from parallel_executor_test_base import TestParallelExecutorBase, DeviceType
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
import paddle
import paddle.dataset.mnist as mnist
import unittest
import os
def _feed_data_helper():
img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
return img, label
def simple_fc_net(use_feed):
assert use_feed
x, y = _feed_data_helper()
hidden_layer = 4
for _ in range(hidden_layer):
x = fluid.layers.fc(input=x, size=20, act='relu')
y_predict = fluid.layers.fc(input=x, size=10, act='softmax')
cost = fluid.layers.cross_entropy(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
return avg_cost
def fc_with_inplace_net(use_feed):
assert use_feed
x, y = _feed_data_helper()
fc = fluid.layers.fc(input=x, size=20, act='relu')
fc = fluid.layers.fc(input=fc, size=10, act='relu')
reshape = fluid.layers.reshape(x=fc, shape=[-1, 2, 5])
reshape = fluid.layers.reshape(x=reshape, shape=[-1, 5, 2])
y_predict = fluid.layers.fc(input=reshape, size=10, act='softmax')
cost = fluid.layers.cross_entropy(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
return avg_cost
class TestMNIST(TestParallelExecutorBase):
def _dummy_data(self):
np.random.seed(5)
img = np.random.random(size=[32, 784]).astype(np.float32)
label = np.ones(shape=[32, 1], dtype='int64')
return img, label
def _compare_ir_memory_optimize(self, model, use_device):
if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda():
return
img, label = self._dummy_data()
first_loss0, last_loss0 = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
use_device=use_device,
use_ir_memory_optimize=False)
first_loss1, last_loss1 = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
use_device=use_device,
use_ir_memory_optimize=True)
for loss in zip(first_loss0, first_loss1):
self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
for loss in zip(last_loss0, last_loss1):
self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
def test_simple_fc_net(self):
self._compare_ir_memory_optimize(simple_fc_net, DeviceType.CPU)
self._compare_ir_memory_optimize(simple_fc_net, DeviceType.CUDA)
def test_fc_with_reshape_net(self):
self._compare_ir_memory_optimize(fc_with_inplace_net, DeviceType.CPU)
self._compare_ir_memory_optimize(fc_with_inplace_net, DeviceType.CUDA)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
namccart/gnuradio | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
wskplho/fontuley | src/third_party/fontTools/Lib/fontTools/ttLib/__init__.py | 2 | 30232 | """fontTools.ttLib -- a package for dealing with TrueType fonts.
This package offers translators to convert TrueType fonts to Python
objects and vice versa, and additionally from Python to TTX (an XML-based
text format) and vice versa.
Example interactive session:
Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL]
Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam
>>> from fontTools import ttLib
>>> tt = ttLib.TTFont("afont.ttf")
>>> tt['maxp'].numGlyphs
242
>>> tt['OS/2'].achVendID
'B&H\000'
>>> tt['head'].unitsPerEm
2048
>>> tt.saveXML("afont.ttx")
Dumping 'LTSH' table...
Dumping 'OS/2' table...
Dumping 'VDMX' table...
Dumping 'cmap' table...
Dumping 'cvt ' table...
Dumping 'fpgm' table...
Dumping 'glyf' table...
Dumping 'hdmx' table...
Dumping 'head' table...
Dumping 'hhea' table...
Dumping 'hmtx' table...
Dumping 'loca' table...
Dumping 'maxp' table...
Dumping 'name' table...
Dumping 'post' table...
Dumping 'prep' table...
>>> tt2 = ttLib.TTFont()
>>> tt2.importXML("afont.ttx")
>>> tt2['maxp'].numGlyphs
242
>>>
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import os
import sys
haveMacSupport = 0
if sys.platform == "mac":
haveMacSupport = 1
elif sys.platform == "darwin" and sys.version_info[:3] != (2, 2, 0):
# Python 2.2's Mac support is broken, so don't enable it there.
haveMacSupport = 1
class TTLibError(Exception): pass
class TTFont(object):
"""The main font object. It manages file input and output, and offers
a convenient way of accessing tables.
Tables will be only decompiled when necessary, ie. when they're actually
accessed. This means that simple operations can be extremely fast.
"""
def __init__(self, file=None, res_name_or_index=None,
sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False,
verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False,
recalcTimestamp=True, fontNumber=-1, lazy=False, quiet=False):
"""The constructor can be called with a few different arguments.
When reading a font from disk, 'file' should be either a pathname
pointing to a file, or a readable file object.
It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt
resource name or an sfnt resource index number or zero. The latter
case will cause TTLib to autodetect whether the file is a flat file
or a suitcase. (If it's a suitcase, only the first 'sfnt' resource
will be read!)
The 'checkChecksums' argument is used to specify how sfnt
checksums are treated upon reading a file from disk:
0: don't check (default)
1: check, print warnings if a wrong checksum is found
2: check, raise an exception if a wrong checksum is found.
The TTFont constructor can also be called without a 'file'
argument: this is the way to create a new empty font.
In this case you can optionally supply the 'sfntVersion' argument,
and a 'flavor' which can be None, or 'woff'.
If the recalcBBoxes argument is false, a number of things will *not*
be recalculated upon save/compile:
1) glyph bounding boxes
2) maxp font bounding box
3) hhea min/max values
(1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-).
Additionally, upon importing an TTX file, this option cause glyphs
to be compiled right away. This should reduce memory consumption
greatly, and therefore should have some impact on the time needed
to parse/compile large fonts.
If the recalcTimestamp argument is false, the modified timestamp in the
'head' table will *not* be recalculated upon save/compile.
If the allowVID argument is set to true, then virtual GID's are
supported. Asking for a glyph ID with a glyph name or GID that is not in
the font will return a virtual GID. This is valid for GSUB and cmap
tables. For SING glyphlets, the cmap table is used to specify Unicode
values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested
and does not exist in the font, or the glyphname has the form glyphN
and does not exist in the font, then N is used as the virtual GID.
Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new
virtual GIDs, the next is one less than the previous.
If ignoreDecompileErrors is set to True, exceptions raised in
individual tables during decompilation will be ignored, falling
back to the DefaultTable implementation, which simply keeps the
binary data.
If lazy is set to True, many data structures are loaded lazily, upon
access only.
"""
from fontTools.ttLib import sfnt
self.verbose = verbose
self.quiet = quiet
self.lazy = lazy
self.recalcBBoxes = recalcBBoxes
self.recalcTimestamp = recalcTimestamp
self.tables = {}
self.reader = None
# Permit the user to reference glyphs that are not int the font.
self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value.
self.reverseVIDDict = {}
self.VIDDict = {}
self.allowVID = allowVID
self.ignoreDecompileErrors = ignoreDecompileErrors
if not file:
self.sfntVersion = sfntVersion
self.flavor = flavor
self.flavorData = None
return
if not hasattr(file, "read"):
# assume file is a string
if haveMacSupport and res_name_or_index is not None:
# on the mac, we deal with sfnt resources as well as flat files
from . import macUtils
if res_name_or_index == 0:
if macUtils.getSFNTResIndices(file):
# get the first available sfnt font.
file = macUtils.SFNTResourceReader(file, 1)
else:
file = open(file, "rb")
else:
file = macUtils.SFNTResourceReader(file, res_name_or_index)
else:
file = open(file, "rb")
else:
pass # assume "file" is a readable file object
self.reader = sfnt.SFNTReader(file, checkChecksums, fontNumber=fontNumber)
self.sfntVersion = self.reader.sfntVersion
self.flavor = self.reader.flavor
self.flavorData = self.reader.flavorData
def close(self):
"""If we still have a reader object, close it."""
if self.reader is not None:
self.reader.close()
def save(self, file, makeSuitcase=False, reorderTables=True):
"""Save the font to disk. Similarly to the constructor,
the 'file' argument can be either a pathname or a writable
file object.
On the Mac, if makeSuitcase is true, a suitcase (resource fork)
file will we made instead of a flat .ttf file.
"""
from fontTools.ttLib import sfnt
if not hasattr(file, "write"):
closeStream = 1
if os.name == "mac" and makeSuitcase:
from . import macUtils
file = macUtils.SFNTResourceWriter(file, self)
else:
file = open(file, "wb")
if os.name == "mac":
from fontTools.misc.macCreator import setMacCreatorAndType
setMacCreatorAndType(file.name, 'mdos', 'BINA')
else:
# assume "file" is a writable file object
closeStream = 0
tags = list(self.keys())
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
numTables = len(tags)
if reorderTables:
import tempfile
tmp = tempfile.TemporaryFile(prefix="ttx-fonttools")
else:
tmp = file
writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion, self.flavor, self.flavorData)
done = []
for tag in tags:
self._writeTable(tag, writer, done)
writer.close()
if reorderTables:
tmp.flush()
tmp.seek(0)
reorderFontTables(tmp, file)
tmp.close()
if closeStream:
file.close()
def saveXML(self, fileOrPath, progress=None, quiet=False,
tables=None, skipTables=None, splitTables=False, disassembleInstructions=True,
bitmapGlyphDataFormat='raw'):
"""Export the font as TTX (an XML-based text file), or as a series of text
files when splitTables is true. In the latter case, the 'fileOrPath'
argument should be a path to a directory.
The 'tables' argument must either be false (dump all tables) or a
list of tables to dump. The 'skipTables' argument may be a list of tables
to skip, but only when the 'tables' argument is false.
"""
from fontTools import version
from fontTools.misc import xmlWriter
self.disassembleInstructions = disassembleInstructions
self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
if not tables:
tables = list(self.keys())
if "GlyphOrder" not in tables:
tables = ["GlyphOrder"] + tables
if skipTables:
for tag in skipTables:
if tag in tables:
tables.remove(tag)
numTables = len(tables)
if progress:
progress.set(0, numTables)
idlefunc = getattr(progress, "idle", None)
else:
idlefunc = None
writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc)
writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1],
ttLibVersion=version)
writer.newline()
if not splitTables:
writer.newline()
else:
# 'fileOrPath' must now be a path
path, ext = os.path.splitext(fileOrPath)
fileNameTemplate = path + ".%s" + ext
for i in range(numTables):
if progress:
progress.set(i)
tag = tables[i]
if splitTables:
tablePath = fileNameTemplate % tagToIdentifier(tag)
tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc)
tableWriter.begintag("ttFont", ttLibVersion=version)
tableWriter.newline()
tableWriter.newline()
writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
writer.newline()
else:
tableWriter = writer
self._tableToXML(tableWriter, tag, progress, quiet)
if splitTables:
tableWriter.endtag("ttFont")
tableWriter.newline()
tableWriter.close()
if progress:
progress.set((i + 1))
writer.endtag("ttFont")
writer.newline()
writer.close()
if self.verbose:
debugmsg("Done dumping TTX")
def _tableToXML(self, writer, tag, progress, quiet):
if tag in self:
table = self[tag]
report = "Dumping '%s' table..." % tag
else:
report = "No '%s' table found." % tag
if progress:
progress.setLabel(report)
elif self.verbose:
debugmsg(report)
else:
if not quiet:
print(report)
if tag not in self:
return
xmlTag = tagToXML(tag)
if hasattr(table, "ERROR"):
writer.begintag(xmlTag, ERROR="decompilation error")
else:
writer.begintag(xmlTag)
writer.newline()
if tag in ("glyf", "CFF "):
table.toXML(writer, self, progress)
else:
table.toXML(writer, self)
writer.endtag(xmlTag)
writer.newline()
writer.newline()
def importXML(self, file, progress=None, quiet=False):
"""Import a TTX file (an XML-based text format), so as to recreate
a font object.
"""
if "maxp" in self and "post" in self:
# Make sure the glyph order is loaded, as it otherwise gets
# lost if the XML doesn't contain the glyph order, yet does
# contain the table which was originally used to extract the
# glyph names from (ie. 'post', 'cmap' or 'CFF ').
self.getGlyphOrder()
from fontTools.misc import xmlReader
reader = xmlReader.XMLReader(file, self, progress, quiet)
reader.read()
def isLoaded(self, tag):
"""Return true if the table identified by 'tag' has been
decompiled and loaded into memory."""
return tag in self.tables
def has_key(self, tag):
if self.isLoaded(tag):
return True
elif self.reader and tag in self.reader:
return True
elif tag == "GlyphOrder":
return True
else:
return False
__contains__ = has_key
def keys(self):
keys = list(self.tables.keys())
if self.reader:
for key in list(self.reader.keys()):
if key not in keys:
keys.append(key)
if "GlyphOrder" in keys:
keys.remove("GlyphOrder")
keys = sortedTagList(keys)
return ["GlyphOrder"] + keys
def __len__(self):
return len(list(self.keys()))
def __getitem__(self, tag):
tag = Tag(tag)
try:
return self.tables[tag]
except KeyError:
if tag == "GlyphOrder":
table = GlyphOrder(tag)
self.tables[tag] = table
return table
if self.reader is not None:
import traceback
if self.verbose:
debugmsg("Reading '%s' table from disk" % tag)
data = self.reader[tag]
tableClass = getTableClass(tag)
table = tableClass(tag)
self.tables[tag] = table
if self.verbose:
debugmsg("Decompiling '%s' table" % tag)
try:
table.decompile(data, self)
except:
if not self.ignoreDecompileErrors:
raise
# fall back to DefaultTable, retaining the binary table data
print("An exception occurred during the decompilation of the '%s' table" % tag)
from .tables.DefaultTable import DefaultTable
file = StringIO()
traceback.print_exc(file=file)
table = DefaultTable(tag)
table.ERROR = file.getvalue()
self.tables[tag] = table
table.decompile(data, self)
return table
else:
raise KeyError("'%s' table not found" % tag)
def __setitem__(self, tag, table):
self.tables[Tag(tag)] = table
def __delitem__(self, tag):
if tag not in self:
raise KeyError("'%s' table not found" % tag)
if tag in self.tables:
del self.tables[tag]
if self.reader and tag in self.reader:
del self.reader[tag]
def get(self, tag, default=None):
try:
return self[tag]
except KeyError:
return default
def setGlyphOrder(self, glyphOrder):
self.glyphOrder = glyphOrder
def getGlyphOrder(self):
try:
return self.glyphOrder
except AttributeError:
pass
if 'CFF ' in self:
cff = self['CFF ']
self.glyphOrder = cff.getGlyphOrder()
elif 'post' in self:
# TrueType font
glyphOrder = self['post'].getGlyphOrder()
if glyphOrder is None:
#
# No names found in the 'post' table.
# Try to create glyph names from the unicode cmap (if available)
# in combination with the Adobe Glyph List (AGL).
#
self._getGlyphNamesFromCmap()
else:
self.glyphOrder = glyphOrder
else:
self._getGlyphNamesFromCmap()
return self.glyphOrder
def _getGlyphNamesFromCmap(self):
#
# This is rather convoluted, but then again, it's an interesting problem:
# - we need to use the unicode values found in the cmap table to
# build glyph names (eg. because there is only a minimal post table,
# or none at all).
# - but the cmap parser also needs glyph names to work with...
# So here's what we do:
# - make up glyph names based on glyphID
# - load a temporary cmap table based on those names
# - extract the unicode values, build the "real" glyph names
# - unload the temporary cmap table
#
if self.isLoaded("cmap"):
# Bootstrapping: we're getting called by the cmap parser
# itself. This means self.tables['cmap'] contains a partially
# loaded cmap, making it impossible to get at a unicode
# subtable here. We remove the partially loaded cmap and
# restore it later.
# This only happens if the cmap table is loaded before any
# other table that does f.getGlyphOrder() or f.getGlyphName().
cmapLoading = self.tables['cmap']
del self.tables['cmap']
else:
cmapLoading = None
# Make up glyph names based on glyphID, which will be used by the
# temporary cmap and by the real cmap in case we don't find a unicode
# cmap.
numGlyphs = int(self['maxp'].numGlyphs)
glyphOrder = [None] * numGlyphs
glyphOrder[0] = ".notdef"
for i in range(1, numGlyphs):
glyphOrder[i] = "glyph%.5d" % i
# Set the glyph order, so the cmap parser has something
# to work with (so we don't get called recursively).
self.glyphOrder = glyphOrder
# Get a (new) temporary cmap (based on the just invented names)
tempcmap = self['cmap'].getcmap(3, 1)
if tempcmap is not None:
# we have a unicode cmap
from fontTools import agl
cmap = tempcmap.cmap
# create a reverse cmap dict
reversecmap = {}
for unicode, name in list(cmap.items()):
reversecmap[name] = unicode
allNames = {}
for i in range(numGlyphs):
tempName = glyphOrder[i]
if tempName in reversecmap:
unicode = reversecmap[tempName]
if unicode in agl.UV2AGL:
# get name from the Adobe Glyph List
glyphName = agl.UV2AGL[unicode]
else:
# create uni<CODE> name
glyphName = "uni%04X" % unicode
tempName = glyphName
n = allNames.get(tempName, 0)
if n:
tempName = glyphName + "#" + str(n)
glyphOrder[i] = tempName
allNames[tempName] = n + 1
# Delete the temporary cmap table from the cache, so it can
# be parsed again with the right names.
del self.tables['cmap']
else:
pass # no unicode cmap available, stick with the invented names
self.glyphOrder = glyphOrder
if cmapLoading:
# restore partially loaded cmap, so it can continue loading
# using the proper names.
self.tables['cmap'] = cmapLoading
def getGlyphNames(self):
"""Get a list of glyph names, sorted alphabetically."""
glyphNames = sorted(self.getGlyphOrder()[:])
return glyphNames
def getGlyphNames2(self):
"""Get a list of glyph names, sorted alphabetically,
but not case sensitive.
"""
from fontTools.misc import textTools
return textTools.caselessSort(self.getGlyphOrder())
def getGlyphName(self, glyphID, requireReal=False):
try:
return self.getGlyphOrder()[glyphID]
except IndexError:
if requireReal or not self.allowVID:
# XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in
# the cmap table than there are glyphs. I don't think it's legal...
return "glyph%.5d" % glyphID
else:
# user intends virtual GID support
try:
glyphName = self.VIDDict[glyphID]
except KeyError:
glyphName ="glyph%.5d" % glyphID
self.last_vid = min(glyphID, self.last_vid )
self.reverseVIDDict[glyphName] = glyphID
self.VIDDict[glyphID] = glyphName
return glyphName
def getGlyphID(self, glyphName, requireReal=False):
if not hasattr(self, "_reverseGlyphOrderDict"):
self._buildReverseGlyphOrderDict()
glyphOrder = self.getGlyphOrder()
d = self._reverseGlyphOrderDict
if glyphName not in d:
if glyphName in glyphOrder:
self._buildReverseGlyphOrderDict()
return self.getGlyphID(glyphName)
else:
if requireReal:
raise KeyError(glyphName)
elif not self.allowVID:
# Handle glyphXXX only
if glyphName[:5] == "glyph":
try:
return int(glyphName[5:])
except (NameError, ValueError):
raise KeyError(glyphName)
else:
# user intends virtual GID support
try:
glyphID = self.reverseVIDDict[glyphName]
except KeyError:
# if name is in glyphXXX format, use the specified name.
if glyphName[:5] == "glyph":
try:
glyphID = int(glyphName[5:])
except (NameError, ValueError):
glyphID = None
if glyphID is None:
glyphID = self.last_vid -1
self.last_vid = glyphID
self.reverseVIDDict[glyphName] = glyphID
self.VIDDict[glyphID] = glyphName
return glyphID
glyphID = d[glyphName]
if glyphName != glyphOrder[glyphID]:
self._buildReverseGlyphOrderDict()
return self.getGlyphID(glyphName)
return glyphID
def getReverseGlyphMap(self, rebuild=False):
if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
self._buildReverseGlyphOrderDict()
return self._reverseGlyphOrderDict
def _buildReverseGlyphOrderDict(self):
self._reverseGlyphOrderDict = d = {}
glyphOrder = self.getGlyphOrder()
for glyphID in range(len(glyphOrder)):
d[glyphOrder[glyphID]] = glyphID
def _writeTable(self, tag, writer, done):
"""Internal helper function for self.save(). Keeps track of
inter-table dependencies.
"""
if tag in done:
return
tableClass = getTableClass(tag)
for masterTable in tableClass.dependencies:
if masterTable not in done:
if masterTable in self:
self._writeTable(masterTable, writer, done)
else:
done.append(masterTable)
tabledata = self.getTableData(tag)
if self.verbose:
debugmsg("writing '%s' table to disk" % tag)
writer[tag] = tabledata
done.append(tag)
def getTableData(self, tag):
"""Returns raw table data, whether compiled or directly read from disk.
"""
tag = Tag(tag)
if self.isLoaded(tag):
if self.verbose:
debugmsg("compiling '%s' table" % tag)
return self.tables[tag].compile(self)
elif self.reader and tag in self.reader:
if self.verbose:
debugmsg("Reading '%s' table from disk" % tag)
return self.reader[tag]
else:
raise KeyError(tag)
def getGlyphSet(self, preferCFF=True):
"""Return a generic GlyphSet, which is a dict-like object
mapping glyph names to glyph objects. The returned glyph objects
have a .draw() method that supports the Pen protocol, and will
have an attribute named 'width', but only *after* the .draw() method
has been called.
If the font is CFF-based, the outlines will be taken from the 'CFF '
table. Otherwise the outlines will be taken from the 'glyf' table.
If the font contains both a 'CFF ' and a 'glyf' table, you can use
the 'preferCFF' argument to specify which one should be taken.
"""
if preferCFF and "CFF " in self:
return list(self["CFF "].cff.values())[0].CharStrings
if "glyf" in self:
return _TTGlyphSet(self)
if "CFF " in self:
return list(self["CFF "].cff.values())[0].CharStrings
raise TTLibError("Font contains no outlines")
class _TTGlyphSet(object):
"""Generic dict-like GlyphSet class, meant as a TrueType counterpart
to CFF's CharString dict. See TTFont.getGlyphSet().
"""
# This class is distinct from the 'glyf' table itself because we need
# access to the 'hmtx' table, which could cause a dependency problem
# there when reading from XML.
def __init__(self, ttFont):
self._ttFont = ttFont
def keys(self):
return list(self._ttFont["glyf"].keys())
def has_key(self, glyphName):
return glyphName in self._ttFont["glyf"]
__contains__ = has_key
def __getitem__(self, glyphName):
return _TTGlyph(glyphName, self._ttFont)
def get(self, glyphName, default=None):
try:
return self[glyphName]
except KeyError:
return default
class _TTGlyph(object):
"""Wrapper for a TrueType glyph that supports the Pen protocol, meaning
that it has a .draw() method that takes a pen object as its only
argument. Additionally there is a 'width' attribute.
"""
def __init__(self, glyphName, ttFont):
self._glyphName = glyphName
self._ttFont = ttFont
self.width, self.lsb = self._ttFont['hmtx'][self._glyphName]
def draw(self, pen):
"""Draw the glyph onto Pen. See fontTools.pens.basePen for details
how that works.
"""
glyfTable = self._ttFont['glyf']
glyph = glyfTable[self._glyphName]
if hasattr(glyph, "xMin"):
offset = self.lsb - glyph.xMin
else:
offset = 0
if glyph.isComposite():
for component in glyph:
glyphName, transform = component.getComponentInfo()
pen.addComponent(glyphName, transform)
else:
coordinates, endPts, flags = glyph.getCoordinates(glyfTable)
if offset:
coordinates = coordinates + (offset, 0)
start = 0
for end in endPts:
end = end + 1
contour = coordinates[start:end].tolist()
cFlags = flags[start:end].tolist()
start = end
if 1 not in cFlags:
# There is not a single on-curve point on the curve,
# use pen.qCurveTo's special case by specifying None
# as the on-curve point.
contour.append(None)
pen.qCurveTo(*contour)
else:
# Shuffle the points so that contour the is guaranteed
# to *end* in an on-curve point, which we'll use for
# the moveTo.
firstOnCurve = cFlags.index(1) + 1
contour = contour[firstOnCurve:] + contour[:firstOnCurve]
cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve]
pen.moveTo(contour[-1])
while contour:
nextOnCurve = cFlags.index(1) + 1
if nextOnCurve == 1:
pen.lineTo(contour[0])
else:
pen.qCurveTo(*contour[:nextOnCurve])
contour = contour[nextOnCurve:]
cFlags = cFlags[nextOnCurve:]
pen.closePath()
class GlyphOrder(object):
"""A pseudo table. The glyph order isn't in the font as a separate
table, but it's nice to present it as such in the TTX format.
"""
def __init__(self, tag=None):
pass
def toXML(self, writer, ttFont):
glyphOrder = ttFont.getGlyphOrder()
writer.comment("The 'id' attribute is only for humans; "
"it is ignored when parsed.")
writer.newline()
for i in range(len(glyphOrder)):
glyphName = glyphOrder[i]
writer.simpletag("GlyphID", id=i, name=glyphName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "glyphOrder"):
self.glyphOrder = []
ttFont.setGlyphOrder(self.glyphOrder)
if name == "GlyphID":
self.glyphOrder.append(attrs["name"])
def getTableModule(tag):
"""Fetch the packer/unpacker module for a table.
Return None when no module is found.
"""
from . import tables
pyTag = tagToIdentifier(tag)
try:
__import__("fontTools.ttLib.tables." + pyTag)
except ImportError as err:
# If pyTag is found in the ImportError message,
# means table is not implemented. If it's not
# there, then some other module is missing, don't
# suppress the error.
if str(err).find(pyTag) >= 0:
return None
else:
raise err
else:
return getattr(tables, pyTag)
def getTableClass(tag):
"""Fetch the packer/unpacker class for a table.
Return None when no class is found.
"""
module = getTableModule(tag)
if module is None:
from .tables.DefaultTable import DefaultTable
return DefaultTable
pyTag = tagToIdentifier(tag)
tableClass = getattr(module, "table_" + pyTag)
return tableClass
def getClassTag(klass):
"""Fetch the table tag for a class object."""
name = klass.__name__
assert name[:6] == 'table_'
name = name[6:] # Chop 'table_'
return identifierToTag(name)
def newTable(tag):
"""Return a new instance of a table."""
tableClass = getTableClass(tag)
return tableClass(tag)
def _escapechar(c):
"""Helper function for tagToIdentifier()"""
import re
if re.match("[a-z0-9]", c):
return "_" + c
elif re.match("[A-Z]", c):
return c + "_"
else:
return hex(byteord(c))[2:]
def tagToIdentifier(tag):
"""Convert a table tag to a valid (but UGLY) python identifier,
as well as a filename that's guaranteed to be unique even on a
caseless file system. Each character is mapped to two characters.
Lowercase letters get an underscore before the letter, uppercase
letters get an underscore after the letter. Trailing spaces are
trimmed. Illegal characters are escaped as two hex bytes. If the
result starts with a number (as the result of a hex escape), an
extra underscore is prepended. Examples:
'glyf' -> '_g_l_y_f'
'cvt ' -> '_c_v_t'
'OS/2' -> 'O_S_2f_2'
"""
import re
tag = Tag(tag)
if tag == "GlyphOrder":
return tag
assert len(tag) == 4, "tag should be 4 characters long"
while len(tag) > 1 and tag[-1] == ' ':
tag = tag[:-1]
ident = ""
for c in tag:
ident = ident + _escapechar(c)
if re.match("[0-9]", ident):
ident = "_" + ident
return ident
def identifierToTag(ident):
"""the opposite of tagToIdentifier()"""
if ident == "GlyphOrder":
return ident
if len(ident) % 2 and ident[0] == "_":
ident = ident[1:]
assert not (len(ident) % 2)
tag = ""
for i in range(0, len(ident), 2):
if ident[i] == "_":
tag = tag + ident[i+1]
elif ident[i+1] == "_":
tag = tag + ident[i]
else:
# assume hex
tag = tag + chr(int(ident[i:i+2], 16))
# append trailing spaces
tag = tag + (4 - len(tag)) * ' '
return Tag(tag)
def tagToXML(tag):
"""Similarly to tagToIdentifier(), this converts a TT tag
to a valid XML element name. Since XML element names are
case sensitive, this is a fairly simple/readable translation.
"""
import re
tag = Tag(tag)
if tag == "OS/2":
return "OS_2"
elif tag == "GlyphOrder":
return tag
if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
return tag.strip()
else:
return tagToIdentifier(tag)
def xmlToTag(tag):
"""The opposite of tagToXML()"""
if tag == "OS_2":
return Tag("OS/2")
if len(tag) == 8:
return identifierToTag(tag)
else:
return Tag(tag + " " * (4 - len(tag)))
def debugmsg(msg):
import time
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
# Table order as recommended in the OpenType specification 1.4
TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX",
"hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf",
"kern", "name", "post", "gasp", "PCLT"]
OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post",
"CFF "]
def sortedTagList(tagList, tableOrder=None):
"""Return a sorted copy of tagList, sorted according to the OpenType
specification, or according to a custom tableOrder. If given and not
None, tableOrder needs to be a list of tag names.
"""
tagList = sorted(tagList)
if tableOrder is None:
if "DSIG" in tagList:
# DSIG should be last (XXX spec reference?)
tagList.remove("DSIG")
tagList.append("DSIG")
if "CFF " in tagList:
tableOrder = OTFTableOrder
else:
tableOrder = TTFTableOrder
orderedTables = []
for tag in tableOrder:
if tag in tagList:
orderedTables.append(tag)
tagList.remove(tag)
orderedTables.extend(tagList)
return orderedTables
def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False):
"""Rewrite a font file, ordering the tables as recommended by the
OpenType specification 1.4.
"""
from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
reader = SFNTReader(inFile, checkChecksums=checkChecksums)
writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
tables = list(reader.keys())
for tag in sortedTagList(tables, tableOrder):
writer[tag] = reader[tag]
writer.close()
def maxPowerOfTwo(x):
"""Return the highest exponent of two, so that
(2 ** exponent) <= x. Return 0 if x is 0.
"""
exponent = 0
while x:
x = x >> 1
exponent = exponent + 1
return max(exponent - 1, 0)
def getSearchRange(n, itemSize):
"""Calculate searchRange, entrySelector, rangeShift.
"""
# This stuff needs to be stored in the file, because?
exponent = maxPowerOfTwo(n)
searchRange = (2 ** exponent) * itemSize
entrySelector = exponent
rangeShift = max(0, n * itemSize - searchRange)
return searchRange, entrySelector, rangeShift
| apache-2.0 |
dracos/QGIS | python/plugins/processing/tools/translation.py | 12 | 3016 | # -*- coding: utf-8 -*-
"""
***************************************************************************
classification.py
---------------------
Date : July 2015
Copyright : (C) 2015 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'July 2015'
__copyright__ = '(C) 2015, Arnaud Morvan'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.Processing import Processing
from processing.gui.AlgorithmClassification import (
loadClassification, loadDisplayNames, getClassificationEn, getDisplayNameEn)
def updateTranslations():
"""Update processing.algs.translations module.
Need QGIS python API on python path, can be run from QGIS console. Example:
from processing.tools.translation import updateTranslations
updateTranslations()
"""
loadClassification()
loadDisplayNames()
f = open(os.path.join(os.path.dirname(__file__), '../algs/translations.py'), 'w')
f.write('''# -*- coding: utf-8 -*-
"""
Don't edit this file manually.
Update it from QGIS console:
from processing.tools.translation import updateTranslations
updateTranslations()
"""
from PyQt4.QtCore import QCoreApplication
def translationShadow():
''')
groups = {}
for provider in Processing.providers:
f.write('''
"""{}"""
'''.format(provider.__class__.__name__))
for alg in provider.algs:
display_name = getDisplayNameEn(alg)
f.write(" QCoreApplication.translate(\"{}\", \"{}\")\n"
.format(alg.__class__.__name__,
display_name.replace('"', '\\"')))
if not alg.group in groups:
groups[alg.group] = 'AlgorithmClassification'
group, subgroup = getClassificationEn(alg)
if group is not None and not group in groups:
groups[group] = 'AlgorithmClassification'
if subgroup is not None and not subgroup in groups:
groups[subgroup] = 'AlgorithmClassification'
f.write('''
"""Groups and subgroups"""
''')
for group, context in groups.iteritems():
f.write(" QCoreApplication.translate(\"{}\", \"{}\")\n"
.format(context,
group.replace('"', '\\"')))
| gpl-2.0 |
redhat-openstack/horizon | openstack_dashboard/dashboards/settings/password/forms.py | 63 | 3043 | # Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.forms import ValidationError # noqa
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from horizon.utils import validators
from openstack_dashboard import api
class PasswordForm(forms.SelfHandlingForm):
current_password = forms.CharField(
label=_("Current password"),
widget=forms.PasswordInput(render_value=False))
new_password = forms.RegexField(
label=_("New password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid':
validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm new password"),
widget=forms.PasswordInput(render_value=False))
no_autocomplete = True
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'new_password' in data:
if data['new_password'] != data.get('confirm_password', None):
raise ValidationError(_('Passwords do not match.'))
return data
# We have to protect the entire "data" dict because it contains the
# oldpassword and newpassword strings.
@sensitive_variables('data')
def handle(self, request, data):
user_is_editable = api.keystone.keystone_can_edit_user()
if user_is_editable:
try:
api.keystone.user_update_own_password(request,
data['current_password'],
data['new_password'])
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
msg = _("Password changed. Please log in again to continue.")
utils.add_logout_reason(request, response, msg)
return response
except Exception:
exceptions.handle(request,
_('Unable to change password.'))
return False
else:
messages.error(request, _('Changing password is not supported.'))
return False
| apache-2.0 |
unclechu/avto-lux161 | avto-lux/app/adm/routes/main.py | 1 | 5434 | # -*- coding: utf-8 -*-
import os, time
import hashlib
import datetime
from warnings import warn
from .helpers import request_except_handler, require_auth
from app.configparser import config
from app.utils import get_json_localization
from app.mixins.auth import AuthMixin
from app.mixins.routes import JsonResponseMixin
from app.models.dbconnect import Session
from app.models.usermodels import User
class AdminMainRoute(JsonResponseMixin):
def get(self, *args):
lang = config('LOCALIZATION')['LANG']
localization = get_json_localization('ADMIN')[lang]
kwrgs = {
'page_title': localization['page_title'],
'lang': lang,
'local': localization,
'is_auth': 1 if self.get_secure_cookie('user') else 0,
'is_debug': 1 if self.application.settings.get('debug') else 0
}
return self.render('admin/layout.jade', **kwrgs)
class AuthHandler(AuthMixin, JsonResponseMixin):
def post(self):
if self.get_secure_cookie('user'):
return self.json_response({
'status': 'success',
'username': self.get_secure_cookie('user').decode('utf-8')
})
session = Session()
try:
usr = (
session
.query(User)
.filter_by(login=self.get_argument('user'))
.one()
)
except Exception as e:
warn('adm/AuthHandler.post(): user not found:\n%s' % e)
return self.json_response({
'status': 'error',
'error_code': 'user_not_found'
})
finally:
session.close()
compared = self.compare_password(
hpasswd=usr.password,
password=self.get_argument('pass')
)
if compared and usr.is_active:
self.set_secure_cookie('user', usr.login)
return self.json_response({
'status': 'success',
'username': usr.login
})
elif not usr.is_active:
return self.json_response({
'status': 'error',
'error_code': 'user_inactive'
})
return self.json_response({
'status': 'error',
'error_code': 'incorrect_password'
})
class LogoutHandler(JsonResponseMixin):
def post(self):
self.clear_all_cookies()
return self.json_response({'status': 'logout'})
class CreateUser(AuthMixin, JsonResponseMixin):
@require_auth
def post(self):
login = self.get_argument('login')
passwd = self.get_argument('password')
session = Session()
try:
olds = [x[0] for x in session.query(User.login).all()]
except Exception as e:
session.close()
warn('adm/CreateUser.post(): cannot get users logins:\n%s' % e)
raise e
if login == '':
return self.json_response({
'status': 'error',
'error_code': 'unique_key_exist'
})
elif login in olds:
return self.json_response({
'status': 'error',
'error_code': 'incorrect_data'
})
is_active = True
try:
self.get_argument('is_active')
except:
is_active = False
usr = User(
login=login,
password=self.create_password(passwd),
last_login=datetime.datetime.utcnow(),
is_active=is_active
)
try:
session.add(usr)
session.commit()
except Exception as e:
warn('adm/CreateUser.post(): cannot add user:\n%s' % e)
raise e
finally:
session.close()
return self.json_response({'status': 'success'})
class UpdateUser(AuthMixin, JsonResponseMixin):
@require_auth
def post(self):
kwargs = {}
passwrd = self.get_argument('password')
login = self.get_argument('login')
id = self.get_argument('id')
is_active = True
try:
self.get_argument('is_active')
except:
is_active = False
session = Session()
try:
usr = session.query(User).filter_by(id=id).one()
except Exception as e:
session.close()
warn(
'adm/UpdateUser.post(): cannot get user by #%s id:\n%s' %
(str(id), e)
)
raise e
try:
olds = [x[0] for x in session.query(User.login).all()]
except Exception as e:
session.close()
warn('adm/UpdateUser.post(): cannot get users logins:\n%s' % e)
raise e
if login == '':
return self.json_response({
'status': 'error',
'error_code': 'unique_key_exist'
})
elif usr.login != login and login in olds:
return self.json_response({
'status': 'error',
'error_code': 'incorrect_data'
})
kwargs.update({
'login': login,
'is_active': is_active
})
if passwrd != '':
kwargs.update({'password': self.create_password(passwrd)})
try:
session.query(User).filter_by(id=id).update(kwargs)
session.commit()
except Exception as e:
warn(
'adm/UpdateUser.post(): cannot update user #%s data:\n%s' %
(str(id), e)
)
raise e
finally:
session.close()
return self.json_response({'status': 'success'})
class FileUpload(JsonResponseMixin):
_extension_map = {
'application/octet-stream': '', # without extension
'image/svg+xml': '.svg',
'text/plain': '.txt'
}
@require_auth
@request_except_handler
def post(self):
file_path = config('UPLOAD_FILES_PATH')
hashes = []
for f in self.request.files.items():
_file = f[1][0]
_filename = hashlib.sha512(
str(time.time()).encode('utf-8')
).hexdigest()[0:35]
if _file['content_type'] in self._extension_map:
ext = self._extension_map[_file['content_type']]
else:
ext = '.' + _file['content_type'].split('/')[1]
fname = _filename + ext
f = open(os.path.join(file_path, fname), 'wb')
f.write(_file['body'])
f.close()
hashes.append({'name': fname})
return self.json_response({
'status': 'success',
'files': hashes
})
| agpl-3.0 |
dpetzold/django | django/contrib/gis/geos/mutable_list.py | 238 | 10705 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://static.aryehleib.com/oldsite/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from functools import total_ordering
from django.utils import six
from django.utils.six.moves import range
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
"""
_minlength = 0
_maxlength = None
# ### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in range(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in range(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
# ### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
# ### Public list interface Methods ###
# ## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in range(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % str(val))
# ## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
# ### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in range(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in range(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| bsd-3-clause |
tbabej/astropy | astropy/visualization/wcsaxes/tests/test_frame.py | 2 | 5298 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import matplotlib.pyplot as plt
from ....wcs import WCS
from ....tests.helper import pytest, remote_data
from .. import WCSAxes
from ..frame import BaseFrame
from ....tests.image_tests import IMAGE_REFERENCE_DIR
from .test_images import BaseImageTests
class HexagonalFrame(BaseFrame):
spine_names = 'abcdef'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.
xmid2 = (xmin + xmax) * 3. / 4.
self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self['b'].data = np.array(([xmid2, ymin], [xmax, ymid]))
self['c'].data = np.array(([xmax, ymid], [xmid2, ymax]))
self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self['e'].data = np.array(([xmid1, ymax], [xmin, ymid]))
self['f'].data = np.array(([xmin, ymid], [xmid1, ymin]))
class TestFrame(BaseImageTests):
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='custom_frame.png',
tolerance=1.5)
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7],
wcs=wcs,
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color='white')
im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2.,
origin='lower', cmap=plt.cm.gist_heat)
minpad = {}
minpad['a'] = minpad['d'] = 1
minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75
ax.coords['glon'].set_axislabel("Longitude", minpad=minpad)
ax.coords['glon'].set_axislabel_position('ad')
ax.coords['glat'].set_axislabel("Latitude", minpad=minpad)
ax.coords['glat'].set_axislabel_position('bcef')
ax.coords['glon'].set_ticklabel_position('ad')
ax.coords['glat'].set_ticklabel_position('bcef')
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_rectangular.png',
tolerance=1.5)
def test_update_clip_path_rectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_nonrectangular.png',
tolerance=1.5)
def test_update_clip_path_nonrectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal',
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_change_wcs.png',
tolerance=1.5)
def test_update_clip_path_change_wcs(self, tmpdir):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color('purple')
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == 'purple'
| bsd-3-clause |
krzysztof/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | 1 | 6972 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""API for PID relations concepts."""
from __future__ import absolute_import, print_function
from flask import Blueprint
from invenio_db import db
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from ..api import PIDConceptOrdered
from ..models import PIDRelation
from ..utils import resolve_relation_type_config
class PIDVersioning(PIDConceptOrdered):
"""API for PID versioning relations.
- Adds automatic redirection handling for Parent-LastChild
- Sets stricter method signatures, e.g.: 'index' is mandatory parameter
when calling 'insert'.
"""
def __init__(self, child=None, parent=None, draft_deposit=None,
draft_record=None, relation=None):
"""Create a PID versioning API."""
self.relation_type = resolve_relation_type_config('version').id
if relation is not None:
if relation.relation_type != self.relation_type:
raise ValueError("Provided PID relation ({0}) is not a "
"version relation.".format(relation))
super(PIDVersioning, self).__init__(relation=relation)
else:
super(PIDVersioning, self).__init__(
child=child, parent=parent, relation_type=self.relation_type,
relation=relation)
if self.child:
self.relation = PIDRelation.query.filter(
PIDRelation.child_id == self.child.id,
PIDRelation.relation_type == self.relation_type,
).one_or_none()
def insert_child(self, child, index=-1):
"""Insert child into versioning scheme.
Parameter 'index' is has to be an integer.
"""
if index is None:
raise ValueError(
"Incorrect value for child index: {0}".format(index))
with db.session.begin_nested():
super(PIDVersioning, self).insert_child(child, index=index)
self.parent.redirect(child)
def remove_child(self, child):
"""Remove a child from a versioning scheme.
Extends the base method call with always reordering after removal and
adding a redirection from the parent to the last child.
"""
with db.session.begin_nested():
super(PIDVersioning, self).remove_child(child, reorder=True)
if self.last_child is not None:
self.parent.redirect(self.last_child)
def create_parent(self, pid_value, status=PIDStatus.REGISTERED,
redirect=True):
"""Create a parent PID from a child and create a new PID versioning."""
if self.has_parents:
raise Exception("Parent already exists for this child.")
self.parent = PersistentIdentifier.create(
self.child.pid_type, pid_value,
object_type=self.child.object_type,
status=status)
self.relation = PIDRelation.create(
self.parent, self.child, self.relation_type, 0)
if redirect:
self.parent.redirect(self.child)
@property
def exists(self):
"""Check if the PID Versioning exists."""
return self.parent is not None
@property
def last_child(self):
"""
Get the latest PID as pointed by the Head PID.
If the 'pid' is a Head PID, return the latest of its children.
If the 'pid' is a Version PID, return the latest of its siblings.
Return None for the non-versioned PIDs.
"""
return self.get_children(ordered=False,
pid_status=PIDStatus.REGISTERED).filter(
PIDRelation.index.isnot(None)).order_by(
PIDRelation.index.desc()).first()
@property
def draft_child(self):
"""Get the last non-registered child."""
return self.get_children(ordered=False).filter(
PIDRelation.index.isnot(None),
PersistentIdentifier.status == PIDStatus.RESERVED).order_by(
PIDRelation.index.desc()).one_or_none()
@property
def draft_child_deposit(self):
"""
Get the deposit of the draft child.
Return `None` if no new-version deposit exists.
"""
from invenio_pidrelations.contrib.records import RecordDraft
if self.draft_child:
return RecordDraft.get_draft(self.draft_child)
else:
return None
def insert_draft_child(self, child):
"""Insert a draft child to versioning."""
if not self.draft_child:
with db.session.begin_nested():
super(PIDVersioning, self).insert_child(child, index=-1)
else:
raise Exception(
"Draft child already exists for this relation: {0}".format(
self.draft_child))
def remove_draft_child(self):
"""Remove the draft child from versioning."""
if self.draft_child:
with db.session.begin_nested():
super(PIDVersioning, self).remove_child(self.draft_child,
reorder=True)
def update_redirect(self):
"""Update the parent redirect to the current last child."""
if self.last_child:
if self.parent.status == PIDStatus.RESERVED:
self.parent.register()
self.parent.redirect(self.last_child)
@property
def children(self):
"""Children of the parent."""
return self.get_children(pid_status=PIDStatus.REGISTERED, ordered=True)
versioning_blueprint = Blueprint(
'invenio_pidrelations.versioning',
__name__,
template_folder='templates'
)
@versioning_blueprint.app_template_filter()
def to_versioning_api(pid, child=True):
"""Get PIDVersioning object."""
return PIDVersioning(
child=pid if child else None,
parent=pid if not child else None
)
__all__ = (
'PIDVersioning',
'versioning_blueprint'
)
| gpl-2.0 |
CasparLi/calibre | src/calibre/ebooks/compression/tcr.py | 24 | 5143 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import re
class TCRCompressor(object):
'''
TCR compression takes the form header+code_dict+coded_text.
The header is always "!!8-Bit!!". The code dict is a list of 256 strings.
The list takes the form 1 byte length and then a string. Each position in
The list corresponds to a code found in the file. The coded text is
string of characters values. for instance the character Q represents the
value 81 which corresponds to the string in the code list at position 81.
'''
def _reset(self):
# List of indexes in the codes list that are empty and can hold new codes
self.unused_codes = set()
self.coded_txt = ''
# Generate initial codes from text.
# The index of the list will be the code that represents the characters at that location
# in the list
self.codes = []
def _combine_codes(self):
'''
Combine two codes that always appear in pair into a single code.
The intent is to create more unused codes.
'''
possible_codes = []
a_code = set(re.findall('(?msu).', self.coded_txt))
for code in a_code:
single_code = set(re.findall('(?msu)%s.' % re.escape(code), self.coded_txt))
if len(single_code) == 1:
possible_codes.append(single_code.pop())
for code in possible_codes:
self.coded_txt = self.coded_txt.replace(code, code[0])
self.codes[ord(code[0])] = '%s%s' % (self.codes[ord(code[0])], self.codes[ord(code[1])])
def _free_unused_codes(self):
'''
Look for codes that do no not appear in the coded text and add them to
the list of free codes.
'''
for i in xrange(256):
if i not in self.unused_codes:
if chr(i) not in self.coded_txt:
self.unused_codes.add(i)
def _new_codes(self):
'''
Create new codes from codes that occur in pairs often.
'''
possible_new_codes = list(set(re.findall('(?msu)..', self.coded_txt)))
new_codes_count = []
for c in possible_new_codes:
count = self.coded_txt.count(c)
# Less than 3 occurrences will not produce any size reduction.
if count > 2:
new_codes_count.append((c, count))
# Arrange the codes in order of least to most occurring.
possible_new_codes = [x[0] for x in sorted(new_codes_count, key=lambda c: c[1])]
return possible_new_codes
def compress(self, txt):
self._reset()
self.codes = list(set(re.findall('(?msu).', txt)))
# Replace the text with their corresponding code
for c in txt:
self.coded_txt += chr(self.codes.index(c))
# Zero the unused codes and record which are unused.
for i in range(len(self.codes), 256):
self.codes.append('')
self.unused_codes.add(i)
self._combine_codes()
possible_codes = self._new_codes()
while possible_codes and self.unused_codes:
while possible_codes and self.unused_codes:
unused_code = self.unused_codes.pop()
# Take the last possible codes and split it into individual
# codes. The last possible code is the most often occurring.
code1, code2 = possible_codes.pop()
self.codes[unused_code] = '%s%s' % (self.codes[ord(code1)], self.codes[ord(code2)])
self.coded_txt = self.coded_txt.replace('%s%s' % (code1, code2), chr(unused_code))
self._combine_codes()
self._free_unused_codes()
possible_codes = self._new_codes()
self._free_unused_codes()
# Generate the code dictionary.
code_dict = []
for i in xrange(0, 256):
if i in self.unused_codes:
code_dict.append(chr(0))
else:
code_dict.append(chr(len(self.codes[i])) + self.codes[i])
# Join the identifier with the dictionary and coded text.
return '!!8-Bit!!'+''.join(code_dict)+self.coded_txt
def decompress(stream):
txt = []
stream.seek(0)
if stream.read(9) != '!!8-Bit!!':
raise ValueError('File %s contains an invalid TCR header.' % stream.name)
# Codes that the file contents are broken down into.
entries = []
for i in xrange(256):
entry_len = ord(stream.read(1))
entries.append(stream.read(entry_len))
# Map the values in the file to locations in the string list.
entry_loc = stream.read(1)
while entry_loc != '': # EOF
txt.append(entries[ord(entry_loc)])
entry_loc = stream.read(1)
return ''.join(txt)
def compress(txt):
t = TCRCompressor()
return t.compress(txt)
| gpl-3.0 |
ruslanloman/nova | nova/tests/unit/api/openstack/compute/contrib/test_hosts.py | 25 | 18124 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts_v2
from nova.api.openstack.compute.plugins.v3 import hosts as os_hosts_v21
from nova.compute import power_state
from nova.compute import vm_states
from nova import context as context_maker
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_hosts
def stub_service_get_all(context, disabled=None):
return fake_hosts.SERVICES_LIST
def stub_service_get_by_host_and_binary(context, host_name, binary):
for service in stub_service_get_all(context):
if service['host'] == host_name and service['binary'] == binary:
return service
def stub_set_host_enabled(context, host_name, enabled):
"""Simulates three possible behaviours for VM drivers or compute
drivers when enabling or disabling a host.
'enabled' means new instances can go to this host
'disabled' means they can't
"""
results = {True: "enabled", False: "disabled"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
elif host_name == "host_c2":
# Simulate a failure
return results[not enabled]
else:
# Do the right thing
return results[enabled]
def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
results = {True: "on_maintenance", False: "off_maintenance"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
elif host_name == "host_c2":
# Simulate a failure
return results[not mode]
else:
# Do the right thing
return results[mode]
def stub_host_power_action(context, host_name, action):
if host_name == "notimplemented":
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
return action
def _create_instance(**kwargs):
"""Create a test instance."""
ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
"""Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = kwargs.get('user_id', 'admin')
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['instance_type_id'] = '1'
if 'host' in kwargs:
inst['host'] = kwargs.get('host')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['root_gb'] = kwargs.get('root_gb', 30)
inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
inst['task_state'] = kwargs.get('task_state', None)
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['ami_launch_index'] = 0
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
return inst
class FakeRequestWithNovaZone(object):
environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
class HostTestCaseV21(test.TestCase):
"""Test Case for hosts."""
validation_ex = exception.ValidationError
Controller = os_hosts_v21.HostController
policy_ex = exception.PolicyNotAuthorized
def _setup_stubs(self):
# Pretend we have fake_hosts.HOST_LIST in the DB
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
# Only hosts in our fake DB exist
self.stubs.Set(db, 'service_get_by_host_and_binary',
stub_service_get_by_host_and_binary)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def setUp(self):
super(HostTestCaseV21, self).setUp()
self.controller = self.Controller()
self.hosts_api = self.controller.api
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self._setup_stubs()
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body=body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
result = self.controller.index(self.req)
self.assertIn('hosts', result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
self._test_host_update('host_c2', 'status', 'disable', 'enabled')
def test_enable_host(self):
self._test_host_update('host_c1', 'status', 'enable', 'enabled')
self._test_host_update('host_c2', 'status', 'enable', 'disabled')
def test_enable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'enable', 'on_maintenance')
def test_disable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
def _test_host_update_notimpl(self, key, val):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
self.stubs.Set(db, 'service_get_all',
stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
self.req, 'notimplemented', body=body)
def test_disable_host_notimpl(self):
self._test_host_update_notimpl('status', 'disable')
def test_enable_maintenance_notimpl(self):
self._test_host_update_notimpl('maintenance_mode', 'enable')
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
def test_host_shutdown(self):
result = self.controller.shutdown(self.req, "host_c1")
self.assertEqual(result["power_action"], "shutdown")
def test_host_reboot(self):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
def _test_host_power_action_notimpl(self, method):
self.assertRaises(webob.exc.HTTPNotImplemented,
method, self.req, "notimplemented")
def test_host_startup_notimpl(self):
self._test_host_power_action_notimpl(self.controller.startup)
def test_host_shutdown_notimpl(self):
self._test_host_power_action_notimpl(self.controller.shutdown)
def test_host_reboot_notimpl(self):
self._test_host_power_action_notimpl(self.controller.reboot)
def test_host_status_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.update(self.req, dest, body={'status': 'enable'})
def test_host_maintenance_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.update(self.req, dest,
body={'maintenance_mode': 'enable'})
def test_host_power_action_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.reboot(self.req, dest)
def test_host_status_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.update(self.req, dest, body={'status': 'enable'})
def test_host_maintenance_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.update(self.req, dest,
body={'maintenance_mode': 'enable'})
def test_host_power_action_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.reboot(self.req, dest)
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_bad_update_key_and_correct_update_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_good_update_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
result = self.controller.update(self.req, 'host_c1', body=body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
def test_show_host_not_exist(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.show(self.req, dest)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'host': s_ref['host'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': ''}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
"""No instances are running on the given host."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 3)
for resource in result['host']:
self.assertIn(resource['resource']['project'], proj)
self.assertEqual(len(resource['resource']), 5)
self.assertEqual(set(column), set(resource['resource'].keys()))
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
"""show() works correctly as expected."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 5)
for resource in result['host']:
self.assertIn(resource['resource']['project'], proj)
self.assertEqual(len(resource['resource']), 5)
self.assertEqual(set(column), set(resource['resource'].keys()))
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['uuid'])
db.instance_destroy(ctxt, i_ref2['uuid'])
def test_list_hosts_with_zone(self):
result = self.controller.index(FakeRequestWithNovaZone())
self.assertIn('hosts', result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
class HostTestCaseV20(HostTestCaseV21):
validation_ex = webob.exc.HTTPBadRequest
policy_ex = webob.exc.HTTPForbidden
Controller = os_hosts_v2.HostController
def test_list_hosts_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.index, fakes.HTTPRequest.blank(''))
def test_host_maintenance_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.update, fakes.HTTPRequest.blank(''),
'host_c1', {'maintenance_mode': 'enable'})
def test_startup_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.startup, fakes.HTTPRequest.blank(''),
'host_c1')
def test_reboot_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.reboot, fakes.HTTPRequest.blank(''),
'host_c1')
def test_shutdown_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.shutdown,
fakes.HTTPRequest.blank(''),
'host_c1')
def test_show_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.show,
fakes.HTTPRequest.blank(''),
1)
class HostsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(HostsPolicyEnforcementV21, self).setUp()
self.controller = os_hosts_v21.HostController()
self.req = fakes.HTTPRequest.blank('')
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-hosts"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:os-hosts"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, 1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/numpy/oldnumeric/typeconv.py | 101 | 1622 | __all__ = ['oldtype2dtype', 'convtypecode', 'convtypecode2', 'oldtypecodes']
import numpy as np
oldtype2dtype = {'1': np.dtype(np.byte),
's': np.dtype(np.short),
# 'i': np.dtype(np.intc),
# 'l': np.dtype(int),
# 'b': np.dtype(np.ubyte),
'w': np.dtype(np.ushort),
'u': np.dtype(np.uintc),
# 'f': np.dtype(np.single),
# 'd': np.dtype(float),
# 'F': np.dtype(np.csingle),
# 'D': np.dtype(complex),
# 'O': np.dtype(object),
# 'c': np.dtype('c'),
None: np.dtype(int)
}
# converts typecode=None to int
def convtypecode(typecode, dtype=None):
if dtype is None:
try:
return oldtype2dtype[typecode]
except:
return np.dtype(typecode)
else:
return dtype
#if both typecode and dtype are None
# return None
def convtypecode2(typecode, dtype=None):
if dtype is None:
if typecode is None:
return None
else:
try:
return oldtype2dtype[typecode]
except:
return np.dtype(typecode)
else:
return dtype
_changedtypes = {'B': 'b',
'b': '1',
'h': 's',
'H': 'w',
'I': 'u'}
class _oldtypecodes(dict):
def __getitem__(self, obj):
char = np.dtype(obj).char
try:
return _changedtypes[char]
except KeyError:
return char
oldtypecodes = _oldtypecodes()
| gpl-3.0 |
jaredkipe/mongo-connector | mongo_connector/doc_managers/formatters.py | 1 | 5486 | import base64
import datetime
import re
from uuid import UUID
from math import isnan, isinf
import logging
LOG = logging.getLogger(__name__)
import bson
import bson.json_util
from mongo_connector.compat import PY3
if PY3:
long = int
unicode = str
RE_TYPE = type(re.compile(""))
try:
from bson.regex import Regex
RE_TYPES = (RE_TYPE, Regex)
except ImportError:
RE_TYPES = (RE_TYPE,)
class DocumentFormatter(object):
"""Interface for classes that can transform documents to conform to
external drivers' expectations.
"""
def transform_value(self, value):
"""Transform a leaf-node in a document.
This method may be overridden to provide custom handling for specific
types of values.
"""
raise NotImplementedError
def transform_element(self, key, value):
"""Transform a single key-value pair within a document.
This method may be overridden to provide custom handling for specific
types of values. This method should return an iterator over the
resulting key-value pairs.
"""
raise NotImplementedError
def format_document(self, document):
"""Format a document in preparation to be sent to an external driver."""
raise NotImplementedError
class DefaultDocumentFormatter(DocumentFormatter):
"""Basic DocumentFormatter that preserves numbers, base64-encodes binary,
and stringifies everything else.
"""
def transform_value(self, value):
# This is largely taken from bson.json_util.default, though not the same
# so we don't modify the structure of the document
if isinstance(value, dict):
return self.format_document(value)
elif isinstance(value, list):
return [self.transform_value(v) for v in value]
if isinstance(value, RE_TYPES):
flags = ""
if value.flags & re.IGNORECASE:
flags += "i"
if value.flags & re.LOCALE:
flags += "l"
if value.flags & re.MULTILINE:
flags += "m"
if value.flags & re.DOTALL:
flags += "s"
if value.flags & re.UNICODE:
flags += "u"
if value.flags & re.VERBOSE:
flags += "x"
pattern = value.pattern
# quasi-JavaScript notation (may include non-standard flags)
return '/%s/%s' % (pattern, flags)
elif (isinstance(value, bson.Binary) or
(PY3 and isinstance(value, bytes))):
# Just include body of binary data without subtype
return base64.b64encode(value).decode()
elif isinstance(value, UUID):
return value.hex
elif isinstance(value, (int, long, float)):
if isnan(value):
raise ValueError("nan")
elif isinf(value):
raise ValueError("inf")
return value
elif isinstance(value, datetime.datetime):
return value
elif value is None:
return value
# Default
return unicode(value)
def transform_element(self, key, value):
try:
new_value = self.transform_value(value)
yield key, new_value
except ValueError as e:
LOG.warn("Invalid value for key: {} as {}".format(key, e))
def format_document(self, document):
def _kernel(doc):
for key in doc:
value = doc[key]
for new_k, new_v in self.transform_element(key, value):
yield new_k, new_v
return dict(_kernel(document))
class DocumentFlattener(DefaultDocumentFormatter):
"""Formatter that completely flattens documents and unwinds arrays:
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
def transform_element(self, key, value):
if isinstance(value, list):
for li, lv in enumerate(value):
for inner_k, inner_v in self.transform_element(
"%s.%s" % (key, li), lv):
yield inner_k, inner_v
elif isinstance(value, dict):
formatted = self.format_document(value)
for doc_key in formatted:
yield "%s.%s" % (key, doc_key), formatted[doc_key]
else:
# We assume that transform_value will return a 'flat' value,
# not a list or dict
yield key, self.transform_value(value)
def format_document(self, document):
def flatten(doc, path):
top_level = (len(path) == 0)
if not top_level:
path_string = ".".join(path)
for k in doc:
v = doc[k]
if isinstance(v, dict):
path.append(k)
for inner_k, inner_v in flatten(v, path):
yield inner_k, inner_v
path.pop()
else:
transformed = self.transform_element(k, v)
for new_k, new_v in transformed:
if top_level:
yield new_k, new_v
else:
yield "%s.%s" % (path_string, new_k), new_v
return dict(flatten(document, []))
| apache-2.0 |
jtyuan/racetrack | src/arch/x86/isa/insts/general_purpose/string/compare_strings.py | 91 | 3952 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CMPS_M_M {
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
ld t1, seg, [1, t0, rsi]
ld t2, es, [1, t0, rdi]
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
};
#
# Versions which have the rep prefix. These could benefit from some loop
# unrolling.
#
def macroop CMPS_E_M_M {
and t0, rcx, rcx, flags=(EZF,), dataSize=asz
br label("end"), flags=(CEZF,)
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
topOfLoop:
ld t1, seg, [1, t0, rsi]
ld t2, es, [1, t0, rdi]
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
subi rcx, rcx, 1, flags=(EZF,), dataSize=asz
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
br label("topOfLoop"), flags=(CSTRZnEZF,)
end:
fault "NoFault"
};
def macroop CMPS_N_M_M {
and t0, rcx, rcx, flags=(EZF,), dataSize=asz
br label("end"), flags=(CEZF,)
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
topOfLoop:
ld t1, seg, [1, t0, rsi]
ld t2, es, [1, t0, rdi]
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
subi rcx, rcx, 1, flags=(EZF,), dataSize=asz
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
br label("topOfLoop"), flags=(CSTRnZnEZF,)
end:
fault "NoFault"
};
'''
| bsd-3-clause |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
jamesgk/robofab | Scripts/RoboFabIntro/intro_GeneratingFonts.py | 8 | 1526 | #FLM: RoboFab Intro, Generating Fonts
#
#
# demo generating fonts with robofab
#
#
# Generating fonts with RoboFab is super easy! Let's have a look.
# (you will need to have a font open in FontLab)
from robofab.world import CurrentFont
import os
# A little function for making folders. we'll need it later.
def makeFolder(path):
#if the path doesn't exist, make it!
if not os.path.exists(path):
os.makedirs(path)
# We need to have a font open for this demo to work
font = CurrentFont()
# This will tell us what folder the font is in
fontPath = os.path.dirname(font.path)
# We'll put the fonts into a folder called "FabFonts" next the .vfb file
macPath = os.path.join(fontPath, 'FabFonts', 'ForMac')
pcPath = os.path.join(fontPath, 'FabFonts', 'ForPC')
bothPath = os.path.join(fontPath, 'FabFonts', 'ForBoth')
# Now, we'll use that little function we made earlier to make the folders
makeFolder(macPath)
makeFolder(pcPath)
makeFolder(bothPath)
# A dict of all the font types we want to output
fontTypes = { 'mac' : ['mactype1', 'macttf', 'macttdfont'],
'pc' : ['pctype1', 'pcmm'],
'both' : ['otfcff', 'otfttf']
}
# Finally, let's generate the fonts!
for macType in fontTypes['mac']:
print "generating %s..."%macType
font.generate(macType, macPath)
for pcType in fontTypes['pc']:
print "generating %s..."%pcType
font.generate(pcType, pcPath)
for bothType in fontTypes['both']:
print "generating %s..."%bothType
font.generate(bothType, bothPath)
print 'Done!'
# Wow! Could it be any easier than that?
| bsd-3-clause |
matthiasdiener/spack | var/spack/repos/builtin/packages/dealii-parameter-gui/package.py | 1 | 1726 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class DealiiParameterGui(CMakePackage):
"""A qt based graphical user interface for editing deal.II .prm parameter
files."""
homepage = "https://github.com/dealii/parameter_gui"
url = "https://github.com/dealii/parameter_gui"
version('develop', git='https://github.com/dealii/parameter_gui.git', branch='master')
depends_on('qt')
def setup_environment(self, spack_env, run_env):
run_env.set('PARAMETER_GUI_DIR', self.prefix)
| lgpl-2.1 |
symbolicdata/code | src/sdeval/classes/templates/comp/SOL_R_poly_sys/Z3/template_sol.py | 1 | 2437 | """
This is the template for extracting the solution for the computation problem of computing
real solution of a polynomial system of equations RR from the output of the computer
algebra system Z3.
.. moduleauthor:: Albert Heinle <[email protected]>
"""
import xml.dom.minidom as dom
import re
#--------------------------------------------------
#---------------The template-----------------------
#--------------------------------------------------
def extractSolution(outpString):
"""
This function extracts the real solutions of a polynomial system
computed by Z3, using the executable code that was
generated by the template in the same folder on a certain
instance.
It returns a string representation of the solution in XML-format.
The XML-string will be given as follows::
<SOL_R_poly_sys_SOL>
<satisfiable>0 or 1, depending on true or not</satisfiable>
</SOL_R_poly_sys_SOL>
If there is no solution given, or something is wrong with the given string,
a ValueError is raised.
:param outpString: The String that was returned by the Z3-execution
:type outpString: str
:returns: XML-Representation of the solution.
:rtype: str
:raises: ValueError
"""
if (type(outpString) != str):
raise ValueError("Wrong Type of argument. String type expected.")
solBeginStr = "=====Solution Begin====="
solEndStr = "=====Solution End====="
solBeginPos = outpString.index(solBeginStr) + len(solBeginStr)
solEndStrPos = outpString.index(solEndStr)
solStr = outpString[solBeginPos:solEndStrPos].strip()
if (solStr == "" and (solStr!="sat" or solStr!="unsat")):
raise ValueError("Output is empty.")
#From here on, we can assume that we are dealing with a valid
#string.
#Construction of the XML-Document
result = dom.Document()
result.appendChild(result.createElement("SOL_R_poly_sys_SOL"))
tempNode = result.firstChild
#Adding the basis
tempNodeSolutions = tempNode.appendChild(result.createElement("satisfiable"))
if solStr == "sat":
tempNodeSolutions.appendChild(result.createTextNode("1"))
else:
tempNodeSolutions.appendChild(result.createTextNode("0"))
return result.toprettyxml(" ")
#--------------------------------------------------
#----------------Help Functions--------------------
#--------------------------------------------------
| gpl-3.0 |
Pointedstick/ReplicatorG | skein_engines/skeinforge-31/fabmetheus_utilities/miscellaneous/nophead/layers.py | 23 | 2549 | from vector3 import Vector3
import Image, ImageDraw
def bounding_cube(layers):
min_x = 999999
min_y = 999999
min_z = 999999
max_x = -999999
max_y = -999999
max_z = -999999
for layer in layers:
for thread in layer:
for point in thread:
if point.x > max_x:
max_x = point.x
if point.y > max_y:
max_y = point.y
if point.z > max_z:
max_z = point.z
if point.x < min_x:
min_x = point.x
if point.y < min_y:
min_y = point.y
if point.z < min_z:
min_z = point.z
return Vector3(min_x, min_y, min_z), Vector3(max_x, max_y, max_z)
def make_images(layers):
palette = []
for i in xrange(256):
#resistor colour codes
if i == 1:
palette.extend((134, 100, 57)) # brown
elif i == 2:
palette.extend((255, 0, 0)) # red
elif i == 3:
palette.extend((218, 90, 35)) # orange
elif i == 4:
palette.extend((255, 255, 0)) # yellow
elif i == 5:
palette.extend(( 0, 255, 0)) # green
elif i == 6:
palette.extend(( 0, 0, 255)) # blue
elif i == 7:
palette.extend((255, 0, 255)) # purple
else:
palette.extend((i, i, i)) # shades of grey
cube = bounding_cube(layers)
scale = 10
x0 = int(cube[0].x) - 1
y0 = int(cube[0].y) - 1
width = int(round(cube[1].x - x0) + 1) * scale
height = int(round(cube[1].y - y0) + 1) * scale
last_pos = None
images = []
for layer in layers:
image = Image.new('P', (width, height), 255)
image.putpalette(palette)
draw = ImageDraw.Draw(image)
segment = 0
for thread in layer:
if last_pos != None:
draw.line(((( last_pos.x - x0) * scale, height - ( last_pos.y - y0) * scale),
((thread[0].x - x0) * scale, height - (thread[0].y - y0) * scale)), fill = 128)
last_pos = thread[0].copy()
for point in thread[1:]:
draw.line((((last_pos.x - x0) * scale, height - (last_pos.y - y0) * scale),
( (point.x - x0) * scale, height - (point.y - y0) * scale)), fill = segment % 8)
last_pos = point.copy()
segment = segment + 1
images.append(image)
return images
| gpl-2.0 |
wenhuizhang/neutron | neutron/tests/api/test_metering_extensions.py | 47 | 6880 | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.tempest import test
LOG = logging.getLogger(__name__)
class MeteringTestJSON(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete Metering labels
List, Show, Create, Delete Metering labels rules
"""
@classmethod
def resource_setup(cls):
super(MeteringTestJSON, cls).resource_setup()
if not test.is_extension_enabled('metering', 'network'):
msg = "metering extension not enabled."
raise cls.skipException(msg)
description = "metering label created by tempest"
name = data_utils.rand_name("metering-label")
cls.metering_label = cls.create_metering_label(name, description)
remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4
else "fd02::/64")
direction = "ingress"
cls.metering_label_rule = cls.create_metering_label_rule(
remote_ip_prefix, direction,
metering_label_id=cls.metering_label['id'])
def _delete_metering_label(self, metering_label_id):
# Deletes a label and verifies if it is deleted or not
self.admin_client.delete_metering_label(metering_label_id)
# Asserting that the label is not found in list after deletion
labels = self.admin_client.list_metering_labels(id=metering_label_id)
self.assertEqual(len(labels['metering_labels']), 0)
def _delete_metering_label_rule(self, metering_label_rule_id):
# Deletes a rule and verifies if it is deleted or not
self.admin_client.delete_metering_label_rule(
metering_label_rule_id)
# Asserting that the rule is not found in list after deletion
rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule_id))
self.assertEqual(len(rules['metering_label_rules']), 0)
@test.attr(type='smoke')
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_metering_labels(self):
# Verify label filtering
body = self.admin_client.list_metering_labels(id=33)
metering_labels = body['metering_labels']
self.assertEqual(0, len(metering_labels))
@test.attr(type='smoke')
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_delete_metering_label_with_filters(self):
# Creates a label
name = data_utils.rand_name('metering-label-')
description = "label created by tempest"
body = self.admin_client.create_metering_label(name=name,
description=description)
metering_label = body['metering_label']
self.addCleanup(self._delete_metering_label,
metering_label['id'])
# Assert whether created labels are found in labels list or fail
# if created labels are not found in labels list
labels = (self.admin_client.list_metering_labels(
id=metering_label['id']))
self.assertEqual(len(labels['metering_labels']), 1)
@test.attr(type='smoke')
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_metering_label(self):
# Verifies the details of a label
body = self.admin_client.show_metering_label(self.metering_label['id'])
metering_label = body['metering_label']
self.assertEqual(self.metering_label['id'], metering_label['id'])
self.assertEqual(self.metering_label['tenant_id'],
metering_label['tenant_id'])
self.assertEqual(self.metering_label['name'], metering_label['name'])
self.assertEqual(self.metering_label['description'],
metering_label['description'])
@test.attr(type='smoke')
@test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
def test_list_metering_label_rules(self):
# Verify rule filtering
body = self.admin_client.list_metering_label_rules(id=33)
metering_label_rules = body['metering_label_rules']
self.assertEqual(0, len(metering_label_rules))
@test.attr(type='smoke')
@test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045')
def test_create_delete_metering_label_rule_with_filters(self):
# Creates a rule
remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
else "fd03::/64")
body = (self.admin_client.create_metering_label_rule(
remote_ip_prefix=remote_ip_prefix,
direction="ingress",
metering_label_id=self.metering_label['id']))
metering_label_rule = body['metering_label_rule']
self.addCleanup(self._delete_metering_label_rule,
metering_label_rule['id'])
# Assert whether created rules are found in rules list or fail
# if created rules are not found in rules list
rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule['id']))
self.assertEqual(len(rules['metering_label_rules']), 1)
@test.attr(type='smoke')
@test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
def test_show_metering_label_rule(self):
# Verifies the details of a rule
body = (self.admin_client.show_metering_label_rule(
self.metering_label_rule['id']))
metering_label_rule = body['metering_label_rule']
self.assertEqual(self.metering_label_rule['id'],
metering_label_rule['id'])
self.assertEqual(self.metering_label_rule['remote_ip_prefix'],
metering_label_rule['remote_ip_prefix'])
self.assertEqual(self.metering_label_rule['direction'],
metering_label_rule['direction'])
self.assertEqual(self.metering_label_rule['metering_label_id'],
metering_label_rule['metering_label_id'])
self.assertFalse(metering_label_rule['excluded'])
class MeteringIpV6TestJSON(MeteringTestJSON):
_ip_version = 6
| apache-2.0 |
nickanderson/ansible | lib/ansible/color.py | 134 | 2388 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import constants
ANSIBLE_COLOR=True
if constants.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR=False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR=False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR=False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
if constants.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR=True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30', 'bright gray': '0;37',
'blue': '0;34', 'white': '1;37',
'green': '0;32', 'bright blue': '1;34',
'cyan': '0;36', 'bright green': '1;32',
'red': '0;31', 'bright cyan': '1;36',
'purple': '0;35', 'bright red': '1;31',
'yellow': '0;33', 'bright purple': '1;35',
'dark gray': '1;30', 'bright yellow': '1;33',
'normal': '0'
}
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
return "\033["+codeCodes[color]+"m"+text+"\033[0m"
else:
return text
# --- end "pretty"
| gpl-3.0 |
mconstantin/watchdog | tools/bootstrap.py | 8 | 10448 | #
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
#
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import sys
import shutil
import tempfile
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-S', '-c',
'try:\n'
' import pickle\n'
'except ImportError:\n'
' print(1)\n'
'else:\n'
' print(0)\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = list(map(quote, args))
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site
sys.path[:] = clean_path
for k, v in list(sys.modules.items()):
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__) == 1 and
not os.path.exists(os.path.join(v.__path__[0], '__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib2.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("--setup-version", dest="setup_version",
help="The version of setuptools or distribute to use.")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute",
default=sys.version_info[0] >= 3,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source + "."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true",
default=sys.version_info[0] > 2,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n'.encode(), '\n'.encode())
ez = {}
exec(ez_code, ez)
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.setup_version:
setup_args['version'] = options.setup_version
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
if sys.version_info[0] >= 3:
import imp
reload_ = imp.reload
else:
reload_ = reload
reload_(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
| apache-2.0 |
MartijnBraam/CouchPotatoServer | libs/pyasn1/codec/cer/decoder.py | 261 | 1230 | # CER decoder
from pyasn1.type import univ
from pyasn1.codec.ber import decoder
from pyasn1.compat.octets import oct2int
from pyasn1 import error
class BooleanDecoder(decoder.AbstractSimpleDecoder):
protoComponent = univ.Boolean(0)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
byte = oct2int(head[0])
# CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
# BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
# in http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
if byte == 0xff:
value = 1
elif byte == 0x00:
value = 0
else:
raise error.PyAsn1Error('Boolean CER violation: %s' % byte)
return self._createComponent(asn1Spec, tagSet, value), tail
tagMap = decoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanDecoder()
})
typeMap = decoder.typeMap
class Decoder(decoder.Decoder): pass
decode = Decoder(tagMap, decoder.typeMap)
| gpl-3.0 |
AugurProject/sidecoin | contrib/spendfrom/spendfrom.py | 2 | 10094 | #!/usr/bin/env python
#
# Use the raw transactions API to spend sidecoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a sidecoind or Sidecoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the sidecoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Sidecoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Sidecoin")
return os.path.expanduser("~/.sidecoin")
def read_sidecoin_config(dbdir):
"""Read the sidecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "sidecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a sidecoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the sidecoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(sidecoind):
info = sidecoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
sidecoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = sidecoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(sidecoind):
address_summary = dict()
address_to_account = dict()
for info in sidecoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = sidecoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = sidecoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-sidecoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(sidecoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(sidecoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to sidecoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = sidecoind.createrawtransaction(inputs, outputs)
signed_rawtx = sidecoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(sidecoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = sidecoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(sidecoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = sidecoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(sidecoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get sidecoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send sidecoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of sidecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_sidecoin_config(options.datadir)
if options.testnet: config['testnet'] = True
sidecoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(sidecoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(sidecoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(sidecoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(sidecoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = sidecoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
Permutatrix/servo | tests/wpt/mozilla/tests/webgl/conformance-2.0.0/py/tex_image_test_generator.py | 27 | 9132 | #!/usr/bin/env python
# Copyright (c) 2015 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and/or associated documentation files (the
# "Materials"), to deal in the Materials without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Materials, and to
# permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
"""
Generator for tex-2d* and tex-3d* tests.
This file needs to be run in its folder.
"""
import os
import os.path
import sys
_LICENSE = """<!--
Copyright (c) 2015 The Khronos Group Inc.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and/or associated documentation files (the
"Materials"), to deal in the Materials without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Materials, and to
permit persons to whom the Materials are furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Materials.
THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
-->
"""
_DO_NOT_EDIT_WARNING = """<!--
This file is auto-generated from py/tex_image_test_generator.py
DO NOT EDIT!
-->
"""
_ELEMENT_TYPES = [
'canvas',
'canvas-sub-rectangle',
'image',
'image-data',
'svg-image',
'video',
'webgl-canvas',
'image-bitmap-from-image-data',
'image-bitmap-from-image',
'image-bitmap-from-video',
'image-bitmap-from-canvas',
'image-bitmap-from-blob',
'image-bitmap-from-image-bitmap'
]
_FORMATS_TYPES_WEBGL1 = [
{'internal_format': 'RGB', 'format': 'RGB', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGB', 'format': 'RGB', 'type': 'UNSIGNED_SHORT_5_6_5' },
{'internal_format': 'RGBA', 'format': 'RGBA', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGBA', 'format': 'RGBA', 'type': 'UNSIGNED_SHORT_4_4_4_4' },
{'internal_format': 'RGBA', 'format': 'RGBA', 'type': 'UNSIGNED_SHORT_5_5_5_1' },
]
_FORMATS_TYPES_WEBGL2 = [
{'internal_format': 'R8', 'format': 'RED', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'R16F', 'format': 'RED', 'type': 'HALF_FLOAT' },
{'internal_format': 'R16F', 'format': 'RED', 'type': 'FLOAT' },
{'internal_format': 'R32F', 'format': 'RED', 'type': 'FLOAT' },
{'internal_format': 'R8UI', 'format': 'RED_INTEGER', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RG8', 'format': 'RG', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RG16F', 'format': 'RG', 'type': 'HALF_FLOAT' },
{'internal_format': 'RG16F', 'format': 'RG', 'type': 'FLOAT' },
{'internal_format': 'RG32F', 'format': 'RG', 'type': 'FLOAT' },
{'internal_format': 'RG8UI', 'format': 'RG_INTEGER', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGB8', 'format': 'RGB', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'SRGB8', 'format': 'RGB', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGB565', 'format': 'RGB', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGB565', 'format': 'RGB', 'type': 'UNSIGNED_SHORT_5_6_5' },
{'internal_format': 'R11F_G11F_B10F', 'format': 'RGB', 'type': 'UNSIGNED_INT_10F_11F_11F_REV' },
{'internal_format': 'R11F_G11F_B10F', 'format': 'RGB', 'type': 'HALF_FLOAT' },
{'internal_format': 'R11F_G11F_B10F', 'format': 'RGB', 'type': 'FLOAT' },
{'internal_format': 'RGB9_E5', 'format': 'RGB', 'type': 'HALF_FLOAT' },
{'internal_format': 'RGB9_E5', 'format': 'RGB', 'type': 'FLOAT' },
{'internal_format': 'RGB16F', 'format': 'RGB', 'type': 'HALF_FLOAT' },
{'internal_format': 'RGB16F', 'format': 'RGB', 'type': 'FLOAT' },
{'internal_format': 'RGB32F', 'format': 'RGB', 'type': 'FLOAT' },
{'internal_format': 'RGB8UI', 'format': 'RGB_INTEGER', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGBA8', 'format': 'RGBA', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'SRGB8_ALPHA8', 'format': 'RGBA', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGB5_A1', 'format': 'RGBA', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGB5_A1', 'format': 'RGBA', 'type': 'UNSIGNED_SHORT_5_5_5_1' },
{'internal_format': 'RGBA4', 'format': 'RGBA', 'type': 'UNSIGNED_BYTE' },
{'internal_format': 'RGBA4', 'format': 'RGBA', 'type': 'UNSIGNED_SHORT_4_4_4_4' },
{'internal_format': 'RGBA16F', 'format': 'RGBA', 'type': 'HALF_FLOAT' },
{'internal_format': 'RGBA16F', 'format': 'RGBA', 'type': 'FLOAT' },
{'internal_format': 'RGBA32F', 'format': 'RGBA', 'type': 'FLOAT' },
{'internal_format': 'RGBA8UI', 'format': 'RGBA_INTEGER', 'type': 'UNSIGNED_BYTE' },
]
def GenerateFilename(dimension, element_type, internal_format, format, type):
"""Generate test filename."""
filename = ("tex-" + dimension + "d-" +
internal_format + "-" + format + "-" + type + ".html")
return filename.lower()
def WriteTest(filename, dimension, element_type, internal_format, format, type, default_context_version):
"""Write one test."""
file = open(filename, "wb")
file.write(_LICENSE)
file.write(_DO_NOT_EDIT_WARNING)
code = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="../../../resources/js-test-style.css"/>
<script src="../../../js/js-test-pre.js"></script>
<script src="../../../js/webgl-test-utils.js"></script>
<script src="../../../js/tests/tex-image-and-sub-image-utils.js"></script>"""
if element_type == 'image-bitmap-from-image-data' or element_type == 'image-bitmap-from-image' or \
element_type == 'image-bitmap-from-video' or element_type == 'image-bitmap-from-canvas' or \
element_type == 'image-bitmap-from-blob' or element_type == 'image-bitmap-from-image-bitmap':
code += """
<script src="../../../js/tests/tex-image-and-sub-image-with-image-bitmap-utils.js"></script>"""
code += """
<script src="../../../js/tests/tex-image-and-sub-image-%(dimension)sd-with-%(element_type)s.js"></script>
</head>
<body>"""
if element_type == 'image-data':
code += """
<canvas id="texcanvas" width="2" height="2"></canvas>"""
code += """
<canvas id="example" width="32" height="32"></canvas>"""
code += """
<div id="description"></div>
<div id="console"></div>
<script>
"use strict";
function testPrologue(gl) {
return true;
}
generateTest("%(internal_format)s", "%(format)s", "%(type)s", testPrologue, "../../../resources/", %(default_context_version)s)();
</script>
</body>
</html>
"""
file.write(code % {
'dimension': dimension,
'element_type': element_type,
'internal_format': internal_format,
'format': format,
'type': type,
'default_context_version': default_context_version,
})
file.close()
def GenerateTests(test_dir, test_cases, dimension, default_context_version):
test_dir_template = test_dir + '/%s'
for element_type in _ELEMENT_TYPES:
os.chdir(test_dir_template % element_type.replace('-', '_'))
if dimension == '3':
# Assume we write 2D tests first.
index_file = open("00_test_list.txt", "ab")
else:
index_file = open("00_test_list.txt", "wb")
for tex_info in test_cases:
internal_format = tex_info['internal_format']
format = tex_info['format']
type = tex_info['type']
filename = GenerateFilename(dimension, element_type, internal_format, format, type)
index_file.write(filename)
index_file.write('\n')
WriteTest(filename, dimension, element_type, internal_format, format, type, default_context_version)
index_file.close();
def main(argv):
"""This is the main function."""
py_dir = os.path.dirname(os.path.realpath(__file__))
GenerateTests(os.path.realpath(py_dir + '/../conformance/textures'), _FORMATS_TYPES_WEBGL1, '2', '1')
GenerateTests(os.path.realpath(py_dir + '/../conformance2/textures'), _FORMATS_TYPES_WEBGL2, '2', '2')
GenerateTests(os.path.realpath(py_dir + '/../conformance2/textures'), _FORMATS_TYPES_WEBGL2, '3', '2')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mpl-2.0 |
stanbellcom/webapp_politik | sites/all/modules/annotator/lib/store-plugin/pyenv/lib/python2.7/site-packages/pyes/pyesthrift/ttypes.py | 2 | 11651 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Method:
GET = 0
PUT = 1
POST = 2
DELETE = 3
HEAD = 4
OPTIONS = 5
_VALUES_TO_NAMES = {
0: "GET",
1: "PUT",
2: "POST",
3: "DELETE",
4: "HEAD",
5: "OPTIONS",
}
_NAMES_TO_VALUES = {
"GET": 0,
"PUT": 1,
"POST": 2,
"DELETE": 3,
"HEAD": 4,
"OPTIONS": 5,
}
class Status:
CONT = 100
SWITCHING_PROTOCOLS = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIED = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
INSUFFICIENT_STORAGE = 506
_VALUES_TO_NAMES = {
100: "CONT",
101: "SWITCHING_PROTOCOLS",
200: "OK",
201: "CREATED",
202: "ACCEPTED",
203: "NON_AUTHORITATIVE_INFORMATION",
204: "NO_CONTENT",
205: "RESET_CONTENT",
206: "PARTIAL_CONTENT",
207: "MULTI_STATUS",
300: "MULTIPLE_CHOICES",
301: "MOVED_PERMANENTLY",
302: "FOUND",
303: "SEE_OTHER",
304: "NOT_MODIFIED",
305: "USE_PROXY",
307: "TEMPORARY_REDIRECT",
400: "BAD_REQUEST",
401: "UNAUTHORIZED",
402: "PAYMENT_REQUIRED",
403: "FORBIDDEN",
404: "NOT_FOUND",
405: "METHOD_NOT_ALLOWED",
406: "NOT_ACCEPTABLE",
407: "PROXY_AUTHENTICATION",
408: "REQUEST_TIMEOUT",
409: "CONFLICT",
410: "GONE",
411: "LENGTH_REQUIRED",
412: "PRECONDITION_FAILED",
413: "REQUEST_ENTITY_TOO_LARGE",
414: "REQUEST_URI_TOO_LONG",
415: "UNSUPPORTED_MEDIA_TYPE",
416: "REQUESTED_RANGE_NOT_SATISFIED",
417: "EXPECTATION_FAILED",
422: "UNPROCESSABLE_ENTITY",
423: "LOCKED",
424: "FAILED_DEPENDENCY",
500: "INTERNAL_SERVER_ERROR",
501: "NOT_IMPLEMENTED",
502: "BAD_GATEWAY",
503: "SERVICE_UNAVAILABLE",
504: "GATEWAY_TIMEOUT",
506: "INSUFFICIENT_STORAGE",
}
_NAMES_TO_VALUES = {
"CONT": 100,
"SWITCHING_PROTOCOLS": 101,
"OK": 200,
"CREATED": 201,
"ACCEPTED": 202,
"NON_AUTHORITATIVE_INFORMATION": 203,
"NO_CONTENT": 204,
"RESET_CONTENT": 205,
"PARTIAL_CONTENT": 206,
"MULTI_STATUS": 207,
"MULTIPLE_CHOICES": 300,
"MOVED_PERMANENTLY": 301,
"FOUND": 302,
"SEE_OTHER": 303,
"NOT_MODIFIED": 304,
"USE_PROXY": 305,
"TEMPORARY_REDIRECT": 307,
"BAD_REQUEST": 400,
"UNAUTHORIZED": 401,
"PAYMENT_REQUIRED": 402,
"FORBIDDEN": 403,
"NOT_FOUND": 404,
"METHOD_NOT_ALLOWED": 405,
"NOT_ACCEPTABLE": 406,
"PROXY_AUTHENTICATION": 407,
"REQUEST_TIMEOUT": 408,
"CONFLICT": 409,
"GONE": 410,
"LENGTH_REQUIRED": 411,
"PRECONDITION_FAILED": 412,
"REQUEST_ENTITY_TOO_LARGE": 413,
"REQUEST_URI_TOO_LONG": 414,
"UNSUPPORTED_MEDIA_TYPE": 415,
"REQUESTED_RANGE_NOT_SATISFIED": 416,
"EXPECTATION_FAILED": 417,
"UNPROCESSABLE_ENTITY": 422,
"LOCKED": 423,
"FAILED_DEPENDENCY": 424,
"INTERNAL_SERVER_ERROR": 500,
"NOT_IMPLEMENTED": 501,
"BAD_GATEWAY": 502,
"SERVICE_UNAVAILABLE": 503,
"GATEWAY_TIMEOUT": 504,
"INSUFFICIENT_STORAGE": 506,
}
class RestRequest:
"""
Attributes:
- method
- uri
- parameters
- headers
- body
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'method', None, None, ), # 1
(2, TType.STRING, 'uri', None, None, ), # 2
(3, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 3
(4, TType.MAP, 'headers', (TType.STRING,None,TType.STRING,None), None, ), # 4
(5, TType.STRING, 'body', None, None, ), # 5
)
def __init__(self, method=None, uri=None, parameters=None, headers=None, body=None,):
self.method = method
self.uri = uri
self.parameters = parameters
self.headers = headers
self.body = body
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.method = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uri = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.parameters = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readString();
self.parameters[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.headers = {}
(_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString();
_val13 = iprot.readString();
self.headers[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.body = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RestRequest')
if self.method != None:
oprot.writeFieldBegin('method', TType.I32, 1)
oprot.writeI32(self.method)
oprot.writeFieldEnd()
if self.uri != None:
oprot.writeFieldBegin('uri', TType.STRING, 2)
oprot.writeString(self.uri)
oprot.writeFieldEnd()
if self.parameters != None:
oprot.writeFieldBegin('parameters', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter14,viter15 in self.parameters.items():
oprot.writeString(kiter14)
oprot.writeString(viter15)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.headers != None:
oprot.writeFieldBegin('headers', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter16,viter17 in self.headers.items():
oprot.writeString(kiter16)
oprot.writeString(viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.body != None:
oprot.writeFieldBegin('body', TType.STRING, 5)
oprot.writeString(self.body)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.method is None:
raise TProtocol.TProtocolException(message='Required field method is unset!')
if self.uri is None:
raise TProtocol.TProtocolException(message='Required field uri is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RestResponse:
"""
Attributes:
- status
- headers
- body
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'status', None, None, ), # 1
(2, TType.MAP, 'headers', (TType.STRING,None,TType.STRING,None), None, ), # 2
(3, TType.STRING, 'body', None, None, ), # 3
)
def __init__(self, status=None, headers=None, body=None,):
self.status = status
self.headers = headers
self.body = body
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.status = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.headers = {}
(_ktype19, _vtype20, _size18 ) = iprot.readMapBegin()
for _i22 in xrange(_size18):
_key23 = iprot.readString();
_val24 = iprot.readString();
self.headers[_key23] = _val24
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.body = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RestResponse')
if self.status != None:
oprot.writeFieldBegin('status', TType.I32, 1)
oprot.writeI32(self.status)
oprot.writeFieldEnd()
if self.headers != None:
oprot.writeFieldBegin('headers', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter25,viter26 in self.headers.items():
oprot.writeString(kiter25)
oprot.writeString(viter26)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.body != None:
oprot.writeFieldBegin('body', TType.STRING, 3)
oprot.writeString(self.body)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| gpl-2.0 |
JohnOrlando/gnuradio-bitshark | gnuradio-core/src/python/gnuradio/blks2impl/dqpsk.py | 9 | 14686 | #
# Copyright 2005,2006,2007,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
"""
differential QPSK modulation and demodulation.
"""
from gnuradio import gr, gru, modulation_utils
from math import pi, sqrt
import psk
import cmath
from pprint import pprint
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_excess_bw = 0.35
_def_gray_code = True
_def_verbose = False
_def_log = False
_def_costas_alpha = 0.15
_def_gain_mu = None
_def_mu = 0.5
_def_omega_relative_limit = 0.005
# /////////////////////////////////////////////////////////////////////////////
# DQPSK modulator
# /////////////////////////////////////////////////////////////////////////////
class dqpsk_mod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
excess_bw=_def_excess_bw,
gray_code=_def_gray_code,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for RRC-filtered QPSK modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
@param samples_per_symbol: samples per symbol >= 2
@type samples_per_symbol: integer
@param excess_bw: Root-raised cosine filter excess bandwidth
@type excess_bw: float
@param gray_code: Tell modulator to Gray code the bits
@type gray_code: bool
@param verbose: Print information about modulator?
@type verbose: bool
@param debug: Print modualtion data to files?
@type debug: bool
"""
gr.hier_block2.__init__(self, "dqpsk_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._excess_bw = excess_bw
self._gray_code = gray_code
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError, ("sbp must be an integer >= 2, is %d" % samples_per_symbol)
ntaps = 11 * samples_per_symbol
arity = pow(2,self.bits_per_symbol())
# turn bytes into k-bit vectors
self.bytes2chunks = \
gr.packed_to_unpacked_bb(self.bits_per_symbol(), gr.GR_MSB_FIRST)
if self._gray_code:
self.symbol_mapper = gr.map_bb(psk.binary_to_gray[arity])
else:
self.symbol_mapper = gr.map_bb(psk.binary_to_ungray[arity])
self.diffenc = gr.diff_encoder_bb(arity)
rot = .707 + .707j
rotated_const = map(lambda pt: pt * rot, psk.constellation[arity])
self.chunks2symbols = gr.chunks_to_symbols_bc(rotated_const)
# pulse shaping filter
self.rrc_taps = gr.firdes.root_raised_cosine(
self._samples_per_symbol, # gain (sps since we're interpolating by sps)
self._samples_per_symbol, # sampling rate
1.0, # symbol rate
self._excess_bw, # excess bandwidth (roll-off factor)
ntaps)
self.rrc_filter = gr.interp_fir_filter_ccf(self._samples_per_symbol, self.rrc_taps)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.bytes2chunks, self.symbol_mapper, self.diffenc,
self.chunks2symbols, self.rrc_filter, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 2
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method. RTFM
def _print_verbage(self):
print "\nModulator:"
print "bits per symbol: %d" % self.bits_per_symbol()
print "Gray code: %s" % self._gray_code
print "RRS roll-off factor: %f" % self._excess_bw
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.bytes2chunks,
gr.file_sink(gr.sizeof_char, "tx_bytes2chunks.dat"))
self.connect(self.symbol_mapper,
gr.file_sink(gr.sizeof_char, "tx_graycoder.dat"))
self.connect(self.diffenc,
gr.file_sink(gr.sizeof_char, "tx_diffenc.dat"))
self.connect(self.chunks2symbols,
gr.file_sink(gr.sizeof_gr_complex, "tx_chunks2symbols.dat"))
self.connect(self.rrc_filter,
gr.file_sink(gr.sizeof_gr_complex, "tx_rrc_filter.dat"))
def add_options(parser):
"""
Adds QPSK modulation-specific options to the standard parser
"""
parser.add_option("", "--excess-bw", type="float", default=_def_excess_bw,
help="set RRC excess bandwith factor [default=%default] (PSK)")
parser.add_option("", "--no-gray-code", dest="gray_code",
action="store_false", default=_def_gray_code,
help="disable gray coding on modulated bits (PSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# DQPSK demodulator
#
# Differentially coherent detection of differentially encoded qpsk
# /////////////////////////////////////////////////////////////////////////////
class dqpsk_demod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
excess_bw=_def_excess_bw,
costas_alpha=_def_costas_alpha,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
gray_code=_def_gray_code,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for RRC-filtered DQPSK demodulation
The input is the complex modulated signal at baseband.
The output is a stream of bits packed 1 bit per byte (LSB)
@param samples_per_symbol: samples per symbol >= 2
@type samples_per_symbol: float
@param excess_bw: Root-raised cosine filter excess bandwidth
@type excess_bw: float
@param costas_alpha: loop filter gain
@type costas_alphas: float
@param gain_mu: for M&M block
@type gain_mu: float
@param mu: for M&M block
@type mu: float
@param omega_relative_limit: for M&M block
@type omega_relative_limit: float
@param gray_code: Tell modulator to Gray code the bits
@type gray_code: bool
@param verbose: Print information about modulator?
@type verbose: bool
@param debug: Print modualtion data to files?
@type debug: bool
"""
gr.hier_block2.__init__(self, "dqpsk_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._excess_bw = excess_bw
self._costas_alpha = costas_alpha
self._mm_gain_mu = gain_mu
self._mm_mu = mu
self._mm_omega_relative_limit = omega_relative_limit
self._gray_code = gray_code
if samples_per_symbol < 2:
raise TypeError, "sbp must be >= 2, is %d" % samples_per_symbol
arity = pow(2,self.bits_per_symbol())
# Automatic gain control
scale = (1.0/16384.0)
self.pre_scaler = gr.multiply_const_cc(scale) # scale the signal from full-range to +-1
#self.agc = gr.agc2_cc(0.6e-1, 1e-3, 1, 1, 100)
self.agc = gr.feedforward_agc_cc(16, 2.0)
# RRC data filter
ntaps = 11 * samples_per_symbol
self.rrc_taps = gr.firdes.root_raised_cosine(
1.0, # gain
self._samples_per_symbol, # sampling rate
1.0, # symbol rate
self._excess_bw, # excess bandwidth (roll-off factor)
ntaps)
self.rrc_filter=gr.interp_fir_filter_ccf(1, self.rrc_taps)
if not self._mm_gain_mu:
sbs_to_mm = {2: 0.050, 3: 0.075, 4: 0.11, 5: 0.125, 6: 0.15, 7: 0.15}
self._mm_gain_mu = sbs_to_mm[samples_per_symbol]
self._mm_omega = self._samples_per_symbol
self._mm_gain_omega = .25 * self._mm_gain_mu * self._mm_gain_mu
self._costas_beta = 0.25 * self._costas_alpha * self._costas_alpha
fmin = -0.25
fmax = 0.25
self.receiver=gr.mpsk_receiver_cc(arity, pi/4.0,
self._costas_alpha, self._costas_beta,
fmin, fmax,
self._mm_mu, self._mm_gain_mu,
self._mm_omega, self._mm_gain_omega,
self._mm_omega_relative_limit)
# Perform Differential decoding on the constellation
self.diffdec = gr.diff_phasor_cc()
# find closest constellation point
rot = 1
rotated_const = map(lambda pt: pt * rot, psk.constellation[arity])
self.slicer = gr.constellation_decoder_cb(rotated_const, range(arity))
if self._gray_code:
self.symbol_mapper = gr.map_bb(psk.gray_to_binary[arity])
else:
self.symbol_mapper = gr.map_bb(psk.ungray_to_binary[arity])
# unpack the k bit vector into a stream of bits
self.unpack = gr.unpack_k_bits_bb(self.bits_per_symbol())
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.pre_scaler, self.agc, self.rrc_filter, self.receiver,
self.diffdec, self.slicer, self.symbol_mapper, self.unpack, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 2
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method. RTFM
def _print_verbage(self):
print "\nDemodulator:"
print "bits per symbol: %d" % self.bits_per_symbol()
print "Gray code: %s" % self._gray_code
print "RRC roll-off factor: %.2f" % self._excess_bw
print "Costas Loop alpha: %.2e" % self._costas_alpha
print "Costas Loop beta: %.2e" % self._costas_beta
print "M&M mu: %.2f" % self._mm_mu
print "M&M mu gain: %.2e" % self._mm_gain_mu
print "M&M omega: %.2f" % self._mm_omega
print "M&M omega gain: %.2e" % self._mm_gain_omega
print "M&M omega limit: %.2f" % self._mm_omega_relative_limit
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.pre_scaler,
gr.file_sink(gr.sizeof_gr_complex, "rx_prescaler.dat"))
self.connect(self.agc,
gr.file_sink(gr.sizeof_gr_complex, "rx_agc.dat"))
self.connect(self.rrc_filter,
gr.file_sink(gr.sizeof_gr_complex, "rx_rrc_filter.dat"))
self.connect(self.receiver,
gr.file_sink(gr.sizeof_gr_complex, "rx_receiver.dat"))
self.connect(self.diffdec,
gr.file_sink(gr.sizeof_gr_complex, "rx_diffdec.dat"))
self.connect(self.slicer,
gr.file_sink(gr.sizeof_char, "rx_slicer.dat"))
self.connect(self.symbol_mapper,
gr.file_sink(gr.sizeof_char, "rx_gray_decoder.dat"))
self.connect(self.unpack,
gr.file_sink(gr.sizeof_char, "rx_unpack.dat"))
def add_options(parser):
"""
Adds modulation-specific options to the standard parser
"""
parser.add_option("", "--excess-bw", type="float", default=_def_excess_bw,
help="set RRC excess bandwith factor [default=%default] (PSK)")
parser.add_option("", "--no-gray-code", dest="gray_code",
action="store_false", default=_def_gray_code,
help="disable gray coding on modulated bits (PSK)")
parser.add_option("", "--costas-alpha", type="float", default=_def_costas_alpha,
help="set Costas loop alpha value [default=%default] (PSK)")
parser.add_option("", "--gain-mu", type="float", default=_def_gain_mu,
help="set M&M symbol sync loop gain mu value [default=%default] (PSK)")
parser.add_option("", "--mu", type="float", default=_def_mu,
help="set M&M symbol sync loop mu value [default=%default] (PSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(
dqpsk_demod.__init__, ('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('dqpsk', dqpsk_mod)
modulation_utils.add_type_1_demod('dqpsk', dqpsk_demod)
| gpl-3.0 |
bollu/sandhi | modules/gr36/docs/sphinx/hieroglyph/nodes.py | 25 | 7930 | __author__ = 'Robert Smallshire'
class Node(object):
def __init__(self, indent=None, lines=None, parent=None):
if indent is not None:
self.indent = indent
else:
self.indent = 0
if lines is not None:
self.lines = lines
else:
self.lines = []
self._parent = parent
self.children = []
parent = property(lambda self: self._parent)
def add_child(self, child):
assert(child.parent is self)
self.children.append(child)
def __repr__(self):
return "Node(" + repr(self.indent) + ", " + repr(self.lines) + ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
prefix = ' ' * self.indent
result.extend(prefix + line for line in self.lines)
for child in self.children:
result.extend(child.render_rst())
return result
class Arg(Node):
def __init__(self, indent, child_indent, name):
super(Arg, self).__init__(indent)
self.child_indent = child_indent
self.name = name
self.type = None
def __repr__(self):
return "Arg(" + repr(self.name) + ", " + repr(self.type) + ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the param description
description = []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
dedent = self.child_indent - self.indent
name = self.name.replace('*', r'\*')
first_description = description[0].lstrip() if len(description) else ''
if not first_description:
# TODO: Emit a warning about a missing argument description
pass
result.append("{indent}:param {name}: {first_description}".format(indent=indent, name=name,
first_description=first_description))
dedented_body = [line[dedent:] for line in description[1:]]
result.extend(dedented_body)
# If a type was specified render the type
if self.type is not None:
result.append("{indent}:type {name}: {type}".format(indent=indent, name=self.name, type=self.type))
result.append('')
ensure_terminal_blank(result)
return result
class Raises(Node):
def __init__(self, indent=None):
super(Raises, self).__init__(indent=indent)
def __repr__(self):
return "Raises(" + repr(self.indent) + ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
result.append(indent + ':raises:')
for child in self.children:
result.extend(child.render_rst(only_child=len(self.children) == 1))
ensure_terminal_blank(result)
return result
class Except(Node):
def __init__(self, indent, type):
super(Except, self).__init__(indent=indent)
#self.child_indent = child_indent
self.type = type
def __repr__(self):
return "Except(" + repr(self.type) + ", children=" + repr(self.children) + ")"
def render_rst(self, only_child=False, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the param description
description = []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
#dedent = self.child_indent - self.indent
bullet = '* ' if not only_child else ''
first_description = description[0].lstrip() if len(description) else ''
result.append("{indent}{bullet}{type} - {first_description}".format(indent=indent,
bullet=bullet, type=self.type,
first_description=first_description))
#dedented_body = [' ' * len(bullet) + line[dedent:] for line in description[1:]]
#result.extend(dedented_body)
result.extend(description[1:])
ensure_terminal_blank(result)
return result
class Returns(Node):
def __init__(self, indent):
super(Returns, self).__init__(indent=indent)
self.title = 'Returns'
self.line = ''
def __repr__(self):
return "Returns(" + str(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the param description
description = [self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
self.render_title(description, indent, result)
result.extend(description[1:])
ensure_terminal_blank(result)
return result
def render_title(self, description, indent, result):
result.append(
"{indent}:returns: {first_description}".format(indent=indent,
first_description=description[0].lstrip()))
class Warning(Node):
def __init__(self, indent):
super(Warning, self).__init__(indent=indent)
def __repr__(self):
return "Warning(" + repr(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
# TODO: Factor out the commonality between this and Note below
result = []
indent = ' ' * self.indent
# Render the param description
description = [self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
# Fix the indent on the first line
if len(description) > 1 and len(description[1].strip()) != 0:
body_indent = len(description[1]) - len(description[1].strip())
else:
body_indent = self.indent + 4
if len(description) > 0:
description[0] = ' ' * body_indent + description[0]
result.append(indent + ".. warning::")
result.append(indent + '')
result.extend(description)
ensure_terminal_blank(result)
return result
class Note(Node):
def __init__(self, indent):
super(Note, self).__init__(indent=indent)
self.line = ''
def __repr__(self):
return "Note(" + repr(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
# TODO: Factor out the commonality between this and Warning above
result = []
indent = ' ' * self.indent
# Render the param description
description = [self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
# Fix the indent on the first line
if len(description) > 1 and len(description[1].strip()) != 0:
body_indent = len(description[1]) - len(description[1].strip())
else:
body_indent = self.indent + 4
if len(description) > 0:
description[0] = ' ' * body_indent + description[0]
result.append(indent + ".. note::")
result.append(indent + '')
result.extend(description)
ensure_terminal_blank(result)
return result
def ensure_terminal_blank(result):
'''If the description didn't end with a blank line add one here.'''
if len(result) > 0:
if len(result[-1].strip()) != 0:
result.append('')
| gpl-3.0 |
magicrub/mavlink | pymavlink/generator/lib/genxmlif/xmlifBase.py | 82 | 5371 | #
# genxmlif, Release 0.9.0
# file: xmlifbase.py
#
# XML interface base classes
#
# history:
# 2005-04-25 rl created
# 2006-08-18 rl some methods for XML schema validation support added
# 2007-05-25 rl performance optimization (caching) added, bugfixes for XPath handling
# 2007-07-04 rl complete re-design, API classes moved to xmlifApi.py
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generic XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__author__ = "Roland Leuthe <[email protected]>"
__date__ = "28 July 2008"
__version__ = "0.9"
from xml.dom import XML_NAMESPACE, XMLNS_NAMESPACE
from xmlifUtils import NsNameTupleFactory, convertToAbsUrl
########################################
# XmlIf builder extension base class
# All not implemented methods have to be overloaded by the derived class!!
#
class XmlIfBuilderExtensionBase:
"""XmlIf builder extension base class.
This class provides additional data (e.g. line numbers or caches)
for an element node which are stored in the element node object during parsing.
"""
def __init__ (self, filePath, absUrl, treeWrapper, elementWrapperClass):
"""Constructor for this class
Input parameter:
filePath: contains the file path of the corresponding XML file
absUrl: contains the absolute URL of the corresponding XML file
"""
self.filePath = filePath
self.absUrl = absUrl
self.baseUrlStack = [absUrl, ]
self.treeWrapper = treeWrapper
self.elementWrapperClass = elementWrapperClass
def startElementHandler (self, curNode, startLineNumber, curNs, attributes=[]):
"""Called by the XML parser at creation of an element node.
Input parameter:
curNode: current element node
startLineNumber: first line number of the element tag in XML file
curNs: namespaces visible for this element node
attributes: list of attributes and their values for this element node
(same sequence as int he XML file)
"""
elementWrapper = self.elementWrapperClass(curNode, self.treeWrapper, curNs, initAttrSeq=0)
elementWrapper.baseUrl = self.__getBaseUrl(elementWrapper)
elementWrapper.absUrl = self.absUrl
elementWrapper.filePath = self.filePath
elementWrapper.startLineNumber = startLineNumber
elementWrapper.curNs.extend ([("xml", XML_NAMESPACE), ("xmlns", XMLNS_NAMESPACE)])
if attributes != []:
for i in range (0, len(attributes), 2):
elementWrapper.attributeSequence.append(attributes[i])
else:
attrList = elementWrapper.getAttributeDict().keys()
attrList.sort()
elementWrapper.attributeSequence.extend (attrList)
self.baseUrlStack.insert (0, elementWrapper.baseUrl)
def endElementHandler (self, curNode, endLineNumber):
"""Called by the XML parser after creation of an element node.
Input parameter:
curNode: current element node
endLineNumber: last line number of the element tag in XML file
"""
curNode.xmlIfExtElementWrapper.endLineNumber = endLineNumber
self.baseUrlStack.pop (0)
def __getBaseUrl (self, elementWrapper):
"""Retrieve base URL for the given element node.
Input parameter:
elementWrapper: wrapper of current element node
"""
nsNameBaseAttr = NsNameTupleFactory ((XML_NAMESPACE, "base"))
if elementWrapper.hasAttribute(nsNameBaseAttr):
return convertToAbsUrl (elementWrapper.getAttribute(nsNameBaseAttr), self.baseUrlStack[0])
else:
return self.baseUrlStack[0]
| lgpl-3.0 |
scascketta/LostNumber | LostNumber/process_msg.py | 1 | 1758 | from twilio.rest import TwilioRestClient
from twilio import TwilioRestException
from redis import StrictRedis
import time
import details
account_sid = details.twilio_account_sid
auth_token = details.twilio_auth_token
client = TwilioRestClient(account_sid, auth_token)
twilio_number = details.twilio_num
r = StrictRedis(host=details.redis_addr, port=details.redis_port)
def start_convo(num, body):
"""
Starts a convo by selecting random partner, adding
relevant index data, and queuing a msg.
"""
r.srem('available_nums', num)
dest = r.srandmember('available_nums')
state = r.smembers(num + ":state").pop()
body = "(1/10 - " + state + ") " + body
r.incr('total_count')
send_msg(dest, body)
pipe = r.pipeline()
pipe.srem('available_nums', dest)
pipe.sadd('in_conversation', num)
pipe.sadd('in_conversation', dest)
pipe.sadd(num, dest)
pipe.sadd(dest, num)
pipe.hset(num + ":" + dest, 'count', '1')
pipe.hset(dest + ":" + num, 'count', '1')
pipe.execute()
def add_msg(dest, body):
r.rpush('message_queue', dest + ":" + body)
def process_queue():
"""Pops msgs from the msg queue and dispatches them."""
raw = r.lpop('message_queue')
if raw:
mark = raw.find(':')
dest = raw[:mark]
body = raw[mark + 1:]
send_msg(dest, body)
def send_msg(dest, body):
# Trim msg body to 160 chars to prevent malicious attempts to send long
# msgs to incur excessive msg fees.
body = body[:160]
try:
client.sms.messages.create(body=body, to=dest, from_=twilio_number)
except TwilioRestException as e:
print repr(e)
if __name__ == '__main__':
while True:
process_queue()
time.sleep(0.5) | mit |
franck-talbart/codelet_tuning_infrastructure | src/cti_hapi/alias.py | 1 | 5818 | #************************************************************************
# Codelet Tuning Infrastructure
# Copyright (C) 2010-2015 Intel Corporation, CEA, GENCI, and UVSQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#************************************************************************
# Authors: Franck Talbart, Mathieu Bordet, Nicolas Petit
""" Alias module provides facilities to manage
the aliases
"""
import cti
import database_manager, database
#------------------------------------------------------------------------
def get_data_alias(uid):
""" get an alias from a data UID. It uses the database so
this function is faster that core's library
Args:
uid: the UID
Returns:
the alias, None if there is no alias
"""
if str(uid) == "":
print "Error: no UID given in get_data_alias."
exit(1)
db = database.Database()
result = database_manager.search({'NAME':["entry_uid"], 'TYPE':"=", 'VAL':uid},
db,
fields=["alias"])
alias = None
for r in result:
alias = r[0]
return alias
#------------------------------------------------------------------------
def get_plugin_alias(uid):
""" get an alias from a plugin UID.
Args:
uid: the UID
Returns:
the alias, None if there is no alias
"""
if str(uid) == "":
print "Error: no UID given in get_plugin_alias."
exit(1)
alias = cti.cti_plugin_alias_plugin_get_key(uid)
return alias
#------------------------------------------------------------------------
def get_data_uid(alias):
""" get an UID from an alias.
Args:
alias: the alias
Returns:
the UID, None if it fails
"""
alias = format_alias(alias)
if alias == "":
print "Error: no alias given in get_data_uid."
exit(1)
db = database.Database()
r = database_manager.search_uids({'NAME':["alias"], 'TYPE':"LIKE", 'VAL':alias},
db)
result = set()
for uid in r:
uid = cti.CTI_UID(str(uid))
result.add(uid)
return result
#------------------------------------------------------------------------
def get_plugin_uid(alias):
""" get an UID from a plugin.
Args:
alias: the alias
Returns:
the UID, None if it fails
"""
alias = format_alias(alias)
uid = cti.cti_plugin_alias_plugin_get_value(alias)
if uid is None:
return None
return cti.CTI_UID(str(uid))
#------------------------------------------------------------------------
def get_repository_uid(alias):
""" get an UID from a plugin.
Args:
alias: the alias
Returns:
the UID, None if it fails
"""
alias = format_alias(alias)
uid = cti.cti_plugin_alias_repository_get_value(alias)
if uid is None:
return None
return cti.CTI_UID(str(uid))
#------------------------------------------------------------------------
def set_data_alias(uid, alias):
""" Create an alias for data.
Args:
uid: CTI_UID
alias: an alias for given uid
Return 1 if it succeeds, 0 otherwise
"""
alias = format_alias(alias)
if get_data_uid(alias):
return 0
if get_plugin_uid(alias) is not None:
return 0
cti.cti_plugin_alias_data_set_value(alias, uid)
db = database.Database()
if database_manager.update("entry_info",
{"alias": alias},
{
'NAME':["entry_uid"],
'TYPE':"=",
'VAL':str(uid)
},
db) is False:
return 0
return 1
#------------------------------------------------------------------------
def set_plugin_alias(uid, alias, dir = None):
""" Create an alias for data.
Args:
uid: CTI_UID
alias: an alias for given uid
"""
alias = format_alias(alias)
if get_data_uid(alias):
return 0
if get_plugin_uid(alias) is not None:
return 0
cti.cti_plugin_alias_plugin_rm_value(uid)
cti.cti_plugin_alias_plugin_set_value(alias, uid, dir)
return 1
#------------------------------------------------------------------------
def set_repository_alias(uid, alias):
""" Create an alias for a repository.
Args:
uid: CTI_UID
alias: an alias for given uid
"""
alias = format_alias(alias)
if alias in ["common", "temp", "all"]:
return 0
if get_repository_uid(alias) is not None:
return 0
cti.cti_plugin_alias_repository_rm_value(uid)
cti.cti_plugin_alias_repository_set_value(alias, uid)
return 1
#------------------------------------------------------------------------
def format_alias(alias):
return str(alias).strip().lower().replace(" ", "_")
#------------------------------------------------------------------------
| gpl-3.0 |
tonybaloney/st2 | st2common/tests/unit/test_policies.py | 6 | 2603 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.persistence.policy import PolicyType, Policy
from st2common.policies import ResourcePolicyApplicator, get_driver
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
__all__ = [
'PolicyTestCase'
]
PACK = 'generic'
TEST_FIXTURES = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
],
'policytypes': [
'fake_policy_type_1.yaml',
'fake_policy_type_2.yaml'
],
'policies': [
'policy_1.yaml',
'policy_2.yaml'
]
}
class PolicyTestCase(DbTestCase):
@classmethod
def setUpClass(cls):
super(PolicyTestCase, cls).setUpClass()
loader = FixturesLoader()
loader.save_fixtures_to_db(fixtures_pack=PACK,
fixtures_dict=TEST_FIXTURES)
def test_get_by_ref(self):
policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency')
self.assertIsNotNone(policy_db)
self.assertEqual(policy_db.pack, 'wolfpack')
self.assertEqual(policy_db.name, 'action-1.concurrency')
policy_type_db = PolicyType.get_by_ref(policy_db.policy_type)
self.assertIsNotNone(policy_type_db)
self.assertEqual(policy_type_db.resource_type, 'action')
self.assertEqual(policy_type_db.name, 'concurrency')
def test_get_driver(self):
policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency')
policy = get_driver(policy_db.ref, policy_db.policy_type, **policy_db.parameters)
self.assertIsInstance(policy, ResourcePolicyApplicator)
self.assertEqual(policy._policy_ref, policy_db.ref)
self.assertEqual(policy._policy_type, policy_db.policy_type)
self.assertTrue(hasattr(policy, 'threshold'))
self.assertEqual(policy.threshold, 3)
| apache-2.0 |
saurabhjn76/sympy | sympy/physics/mechanics/lagrange.py | 45 | 18428 | from __future__ import print_function, division
__all__ = ['LagrangesMethod']
from sympy import diff, zeros, Matrix, eye, sympify
from sympy.physics.vector import dynamicsymbols, ReferenceFrame
from sympy.physics.mechanics.functions import (find_dynamicsymbols, msubs,
_f_list_parser)
from sympy.physics.mechanics.linearize import Linearizer
from sympy.utilities import default_sort_key
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iterable
class LagrangesMethod(object):
"""Lagrange's method object.
This object generates the equations of motion in a two step procedure. The
first step involves the initialization of LagrangesMethod by supplying the
Lagrangian and the generalized coordinates, at the bare minimum. If there
are any constraint equations, they can be supplied as keyword arguments.
The Lagrange multipliers are automatically generated and are equal in
number to the constraint equations. Similarly any non-conservative forces
can be supplied in an iterable (as described below and also shown in the
example) along with a ReferenceFrame. This is also discussed further in the
__init__ method.
Attributes
==========
q, u : Matrix
Matrices of the generalized coordinates and speeds
forcelist : iterable
Iterable of (Point, vector) or (ReferenceFrame, vector) tuples
describing the forces on the system.
mass_matrix : Matrix
The system's mass matrix
forcing : Matrix
The system's forcing vector
mass_matrix_full : Matrix
The "mass matrix" for the qdot's, qdoubledot's, and the
lagrange multipliers (lam)
forcing_full : Matrix
The forcing vector for the qdot's, qdoubledot's and
lagrange multipliers (lam)
Examples
========
This is a simple example for a one degree of freedom translational
spring-mass-damper.
In this example, we first need to do the kinematics.
This involves creating generalized coordinates and their derivatives.
Then we create a point and set its velocity in a frame.
>>> from sympy.physics.mechanics import LagrangesMethod, Lagrangian
>>> from sympy.physics.mechanics import ReferenceFrame, Particle, Point
>>> from sympy.physics.mechanics import dynamicsymbols, kinetic_energy
>>> from sympy import symbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> m, k, b = symbols('m k b')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, qd * N.x)
We need to then prepare the information as required by LagrangesMethod to
generate equations of motion.
First we create the Particle, which has a point attached to it.
Following this the lagrangian is created from the kinetic and potential
energies.
Then, an iterable of nonconservative forces/torques must be constructed,
where each item is a (Point, Vector) or (ReferenceFrame, Vector) tuple,
with the Vectors representing the nonconservative forces or torques.
>>> Pa = Particle('Pa', P, m)
>>> Pa.potential_energy = k * q**2 / 2.0
>>> L = Lagrangian(N, Pa)
>>> fl = [(P, -b * qd * N.x)]
Finally we can generate the equations of motion.
First we create the LagrangesMethod object. To do this one must supply
the Lagrangian, and the generalized coordinates. The constraint equations,
the forcelist, and the inertial frame may also be provided, if relevant.
Next we generate Lagrange's equations of motion, such that:
Lagrange's equations of motion = 0.
We have the equations of motion at this point.
>>> l = LagrangesMethod(L, [q], forcelist = fl, frame = N)
>>> print(l.form_lagranges_equations())
Matrix([[b*Derivative(q(t), t) + 1.0*k*q(t) + m*Derivative(q(t), t, t)]])
We can also solve for the states using the 'rhs' method.
>>> print(l.rhs())
Matrix([[Derivative(q(t), t)], [(-b*Derivative(q(t), t) - 1.0*k*q(t))/m]])
Please refer to the docstrings on each method for more details.
"""
def __init__(self, Lagrangian, qs, coneqs=None, forcelist=None,
frame=None, hol_coneqs=None, nonhol_coneqs=None):
"""Supply the following for the initialization of LagrangesMethod
Lagrangian : Sympifyable
qs: array_like
The generalized coordinates
hol_coneqs: array_like, optional
The holonomic constraint equations
nonhol_coneqs: array_like, optional
The nonholonomic constraint equations
forcelist : iterable, optional
Takes an iterable of (Point, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
This feature is primarily to account for the nonconservative forces
and/or moments.
frame : ReferenceFrame, optional
Supply the inertial frame. This is used to determine the
generalized forces due to non-conservative forces.
"""
self._L = Matrix([sympify(Lagrangian)])
self.eom = None
self._m_cd = Matrix() # Mass Matrix of differentiated coneqs
self._m_d = Matrix() # Mass Matrix of dynamic equations
self._f_cd = Matrix() # Forcing part of the diff coneqs
self._f_d = Matrix() # Forcing part of the dynamic equations
self.lam_coeffs = Matrix() # The coeffecients of the multipliers
forcelist = forcelist if forcelist else []
if not iterable(forcelist):
raise TypeError('Force pairs must be supplied in an iterable.')
self._forcelist = forcelist
if frame and not isinstance(frame, ReferenceFrame):
raise TypeError('frame must be a valid ReferenceFrame')
self.inertial = frame
self.lam_vec = Matrix()
self._term1 = Matrix()
self._term2 = Matrix()
self._term3 = Matrix()
self._term4 = Matrix()
# Creating the qs, qdots and qdoubledots
if not iterable(qs):
raise TypeError('Generalized coordinates must be an iterable')
self._q = Matrix(qs)
self._qdots = self.q.diff(dynamicsymbols._t)
self._qdoubledots = self._qdots.diff(dynamicsymbols._t)
# Deal with constraint equations
if coneqs:
SymPyDeprecationWarning("The `coneqs` kwarg is deprecated in "
"favor of `hol_coneqs` and `nonhol_coneqs`. Please "
"update your code").warn()
self.coneqs = coneqs
else:
mat_build = lambda x: Matrix(x) if x else Matrix()
hol_coneqs = mat_build(hol_coneqs)
nonhol_coneqs = mat_build(nonhol_coneqs)
self.coneqs = Matrix([hol_coneqs.diff(dynamicsymbols._t),
nonhol_coneqs])
self._hol_coneqs = hol_coneqs
def form_lagranges_equations(self):
"""Method to form Lagrange's equations of motion.
Returns a vector of equations of motion using Lagrange's equations of
the second kind.
"""
qds = self._qdots
qdd_zero = dict((i, 0) for i in self._qdoubledots)
n = len(self.q)
# Internally we represent the EOM as four terms:
# EOM = term1 - term2 - term3 - term4 = 0
# First term
self._term1 = self._L.jacobian(qds)
self._term1 = self._term1.diff(dynamicsymbols._t).T
# Second term
self._term2 = self._L.jacobian(self.q).T
# Third term
if self.coneqs:
coneqs = self.coneqs
m = len(coneqs)
# Creating the multipliers
self.lam_vec = Matrix(dynamicsymbols('lam1:' + str(m + 1)))
self.lam_coeffs = -coneqs.jacobian(qds)
self._term3 = self.lam_coeffs.T * self.lam_vec
# Extracting the coeffecients of the qdds from the diff coneqs
diffconeqs = coneqs.diff(dynamicsymbols._t)
self._m_cd = diffconeqs.jacobian(self._qdoubledots)
# The remaining terms i.e. the 'forcing' terms in diff coneqs
self._f_cd = -diffconeqs.subs(qdd_zero)
else:
self._term3 = zeros(n, 1)
# Fourth term
if self.forcelist:
N = self.inertial
self._term4 = zeros(n, 1)
for i, qd in enumerate(qds):
flist = zip(*_f_list_parser(self.forcelist, N))
self._term4[i] = sum(v.diff(qd, N) & f for (v, f) in flist)
else:
self._term4 = zeros(n, 1)
# Form the dynamic mass and forcing matrices
without_lam = self._term1 - self._term2 - self._term4
self._m_d = without_lam.jacobian(self._qdoubledots)
self._f_d = -without_lam.subs(qdd_zero)
# Form the EOM
self.eom = without_lam - self._term3
return self.eom
@property
def mass_matrix(self):
"""Returns the mass matrix, which is augmented by the Lagrange
multipliers, if necessary.
If the system is described by 'n' generalized coordinates and there are
no constraint equations then an n X n matrix is returned.
If there are 'n' generalized coordinates and 'm' constraint equations
have been supplied during initialization then an n X (n+m) matrix is
returned. The (n + m - 1)th and (n + m)th columns contain the
coefficients of the Lagrange multipliers.
"""
if self.eom is None:
raise ValueError('Need to compute the equations of motion first')
if self.coneqs:
return (self._m_d).row_join(self.lam_coeffs.T)
else:
return self._m_d
@property
def mass_matrix_full(self):
"""Augments the coefficients of qdots to the mass_matrix."""
if self.eom is None:
raise ValueError('Need to compute the equations of motion first')
n = len(self.q)
m = len(self.coneqs)
row1 = eye(n).row_join(zeros(n, n + m))
row2 = zeros(n, n).row_join(self.mass_matrix)
if self.coneqs:
row3 = zeros(m, n).row_join(self._m_cd).row_join(zeros(m, m))
return row1.col_join(row2).col_join(row3)
else:
return row1.col_join(row2)
@property
def forcing(self):
"""Returns the forcing vector from 'lagranges_equations' method."""
if self.eom is None:
raise ValueError('Need to compute the equations of motion first')
return self._f_d
@property
def forcing_full(self):
"""Augments qdots to the forcing vector above."""
if self.eom is None:
raise ValueError('Need to compute the equations of motion first')
if self.coneqs:
return self._qdots.col_join(self.forcing).col_join(self._f_cd)
else:
return self._qdots.col_join(self.forcing)
def to_linearizer(self, q_ind=None, qd_ind=None, q_dep=None, qd_dep=None):
"""Returns an instance of the Linearizer class, initiated from the
data in the LagrangesMethod class. This may be more desirable than using
the linearize class method, as the Linearizer object will allow more
efficient recalculation (i.e. about varying operating points).
Parameters
==========
q_ind, qd_ind : array_like, optional
The independent generalized coordinates and speeds.
q_dep, qd_dep : array_like, optional
The dependent generalized coordinates and speeds.
"""
# Compose vectors
t = dynamicsymbols._t
q = self.q
u = self._qdots
ud = u.diff(t)
# Get vector of lagrange multipliers
lams = self.lam_vec
mat_build = lambda x: Matrix(x) if x else Matrix()
q_i = mat_build(q_ind)
q_d = mat_build(q_dep)
u_i = mat_build(qd_ind)
u_d = mat_build(qd_dep)
# Compose general form equations
f_c = self._hol_coneqs
f_v = self.coneqs
f_a = f_v.diff(t)
f_0 = u
f_1 = -u
f_2 = self._term1
f_3 = -(self._term2 + self._term4)
f_4 = -self._term3
# Check that there are an appropriate number of independent and
# dependent coordinates
if len(q_d) != len(f_c) or len(u_d) != len(f_v):
raise ValueError(("Must supply {:} dependent coordinates, and " +
"{:} dependent speeds").format(len(f_c), len(f_v)))
if set(Matrix([q_i, q_d])) != set(q):
raise ValueError("Must partition q into q_ind and q_dep, with " +
"no extra or missing symbols.")
if set(Matrix([u_i, u_d])) != set(u):
raise ValueError("Must partition qd into qd_ind and qd_dep, " +
"with no extra or missing symbols.")
# Find all other dynamic symbols, forming the forcing vector r.
# Sort r to make it canonical.
insyms = set(Matrix([q, u, ud, lams]))
r = list(find_dynamicsymbols(f_3, insyms))
r.sort(key=default_sort_key)
# Check for any derivatives of variables in r that are also found in r.
for i in r:
if diff(i, dynamicsymbols._t) in r:
raise ValueError('Cannot have derivatives of specified \
quantities when linearizing forcing terms.')
return Linearizer(f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a, q, u, q_i,
q_d, u_i, u_d, r, lams)
def linearize(self, q_ind=None, qd_ind=None, q_dep=None, qd_dep=None,
**kwargs):
"""Linearize the equations of motion about a symbolic operating point.
If kwarg A_and_B is False (default), returns M, A, B, r for the
linearized form, M*[q', u']^T = A*[q_ind, u_ind]^T + B*r.
If kwarg A_and_B is True, returns A, B, r for the linearized form
dx = A*x + B*r, where x = [q_ind, u_ind]^T. Note that this is
computationally intensive if there are many symbolic parameters. For
this reason, it may be more desirable to use the default A_and_B=False,
returning M, A, and B. Values may then be substituted in to these
matrices, and the state space form found as
A = P.T*M.inv()*A, B = P.T*M.inv()*B, where P = Linearizer.perm_mat.
In both cases, r is found as all dynamicsymbols in the equations of
motion that are not part of q, u, q', or u'. They are sorted in
canonical form.
The operating points may be also entered using the ``op_point`` kwarg.
This takes a dictionary of {symbol: value}, or a an iterable of such
dictionaries. The values may be numberic or symbolic. The more values
you can specify beforehand, the faster this computation will run.
For more documentation, please see the ``Linearizer`` class."""
linearizer = self.to_linearizer(q_ind, qd_ind, q_dep, qd_dep)
result = linearizer.linearize(**kwargs)
return result + (linearizer.r,)
def solve_multipliers(self, op_point=None, sol_type='dict'):
"""Solves for the values of the lagrange multipliers symbolically at
the specified operating point
Parameters
==========
op_point : dict or iterable of dicts, optional
Point at which to solve at. The operating point is specified as
a dictionary or iterable of dictionaries of {symbol: value}. The
value may be numeric or symbolic itself.
sol_type : str, optional
Solution return type. Valid options are:
- 'dict': A dict of {symbol : value} (default)
- 'Matrix': An ordered column matrix of the solution
"""
# Determine number of multipliers
k = len(self.lam_vec)
if k == 0:
raise ValueError("System has no lagrange multipliers to solve for.")
# Compose dict of operating conditions
if isinstance(op_point, dict):
op_point_dict = op_point
elif iterable(op_point):
op_point_dict = {}
for op in op_point:
op_point_dict.update(op)
elif op_point is None:
op_point_dict = {}
else:
raise TypeError("op_point must be either a dictionary or an "
"iterable of dictionaries.")
# Compose the system to be solved
mass_matrix = self.mass_matrix.col_join((-self.lam_coeffs.row_join(
zeros(k, k))))
force_matrix = self.forcing.col_join(self._f_cd)
# Sub in the operating point
mass_matrix = msubs(mass_matrix, op_point_dict)
force_matrix = msubs(force_matrix, op_point_dict)
# Solve for the multipliers
sol_list = mass_matrix.LUsolve(-force_matrix)[-k:]
if sol_type == 'dict':
return dict(zip(self.lam_vec, sol_list))
elif sol_type == 'Matrix':
return Matrix(sol_list)
else:
raise ValueError("Unknown sol_type {:}.".format(sol_type))
def rhs(self, inv_method=None, **kwargs):
"""Returns equations that can be solved numerically
Parameters
==========
inv_method : str
The specific sympy inverse matrix calculation method to use. For a
list of valid methods, see
:meth:`~sympy.matrices.matrices.MatrixBase.inv`
"""
if 'method' in kwargs:
# The method kwarg is deprecated in favor of inv_method.
SymPyDeprecationWarning(feature="method kwarg",
useinstead="inv_method kwarg",
deprecated_since_version="0.7.6").warn()
# For now accept both
inv_method = kwargs['method']
if inv_method is None:
self._rhs = self.mass_matrix_full.LUsolve(self.forcing_full)
else:
self._rhs = (self.mass_matrix_full.inv(inv_method,
try_block_diag=True) * self.forcing_full)
return self._rhs
@property
def q(self):
return self._q
@property
def u(self):
return self._qdots
@property
def forcelist(self):
return self._forcelist
| bsd-3-clause |
qiqjiao/study | jsoncpp/tags/svn-release-0.6.0-rc2/doxybuild.py | 44 | 6791 | """Script to generate doxygen documentation.
"""
import re
import os
import os.path
import sys
import shutil
from devtools import tarball
def find_program(*filenames):
"""find a program in folders path_lst, and sets env[var]
@param filenames: a list of possible names of the program to search for
@return: the full path of the filename if found, or '' if filename could not be found
"""
paths = os.environ.get('PATH', '').split(os.pathsep)
suffixes = ('win32' in sys.platform ) and '.exe .com .bat .cmd' or ''
for filename in filenames:
for name in [filename+ext for ext in suffixes.split()]:
for directory in paths:
full_path = os.path.join(directory, name)
if os.path.isfile(full_path):
return full_path
return ''
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
print "Can't read source file %s"%sourcefile
raise
for (k,v) in dict.items():
v = v.replace('\\','\\\\')
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
print "Can't write target file %s"%targetfile
raise
def run_doxygen(doxygen_path, config_file, working_dir, is_silent):
config_file = os.path.abspath( config_file )
doxygen_path = doxygen_path
old_cwd = os.getcwd()
try:
os.chdir( working_dir )
cmd = [doxygen_path, config_file]
print 'Running:', ' '.join( cmd )
try:
import subprocess
except:
if os.system( ' '.join( cmd ) ) != 0:
print 'Documentation generation failed'
return False
else:
if is_silent:
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
else:
process = subprocess.Popen( cmd )
stdout, _ = process.communicate()
if process.returncode:
print 'Documentation generation failed:'
print stdout
return False
return True
finally:
os.chdir( old_cwd )
def build_doc( options, make_release=False ):
if make_release:
options.make_tarball = True
options.with_dot = True
options.with_html_help = True
options.with_uml_look = True
options.open = False
options.silent = True
version = open('version','rt').read().strip()
output_dir = 'dist/doxygen' # relative to doc/doxyfile location.
if not os.path.isdir( output_dir ):
os.makedirs( output_dir )
top_dir = os.path.abspath( '.' )
html_output_dirname = 'jsoncpp-api-html-' + version
tarball_path = os.path.join( 'dist', html_output_dirname + '.tar.gz' )
warning_log_path = os.path.join( output_dir, '../jsoncpp-doxygen-warning.log' )
html_output_path = os.path.join( output_dir, html_output_dirname )
def yesno( bool ):
return bool and 'YES' or 'NO'
subst_keys = {
'%JSONCPP_VERSION%': version,
'%DOC_TOPDIR%': '',
'%TOPDIR%': top_dir,
'%HTML_OUTPUT%': os.path.join( '..', output_dir, html_output_dirname ),
'%HAVE_DOT%': yesno(options.with_dot),
'%DOT_PATH%': os.path.split(options.dot_path)[0],
'%HTML_HELP%': yesno(options.with_html_help),
'%UML_LOOK%': yesno(options.with_uml_look),
'%WARNING_LOG_PATH%': os.path.join( '..', warning_log_path )
}
if os.path.isdir( output_dir ):
print 'Deleting directory:', output_dir
shutil.rmtree( output_dir )
if not os.path.isdir( output_dir ):
os.makedirs( output_dir )
do_subst_in_file( 'doc/doxyfile', 'doc/doxyfile.in', subst_keys )
ok = run_doxygen( options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent )
if not options.silent:
print open(warning_log_path, 'rb').read()
index_path = os.path.abspath(os.path.join(subst_keys['%HTML_OUTPUT%'], 'index.html'))
print 'Generated documentation can be found in:'
print index_path
if options.open:
import webbrowser
webbrowser.open( 'file://' + index_path )
if options.make_tarball:
print 'Generating doc tarball to', tarball_path
tarball_sources = [
output_dir,
'README.txt',
'LICENSE',
'NEWS.txt',
'version'
]
tarball_basedir = os.path.join( output_dir, html_output_dirname )
tarball.make_tarball( tarball_path, tarball_sources, tarball_basedir, html_output_dirname )
return tarball_path, html_output_dirname
def main():
usage = """%prog
Generates doxygen documentation in build/doxygen.
Optionaly makes a tarball of the documentation to dist/.
Must be started in the project top directory.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--with-dot', dest="with_dot", action='store_true', default=False,
help="""Enable usage of DOT to generate collaboration diagram""")
parser.add_option('--dot', dest="dot_path", action='store', default=find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--with-html-help', dest="with_html_help", action='store_true', default=False,
help="""Enable generation of Microsoft HTML HELP""")
parser.add_option('--no-uml-look', dest="with_uml_look", action='store_false', default=True,
help="""Generates DOT graph without UML look [Default: False]""")
parser.add_option('--open', dest="open", action='store_true', default=False,
help="""Open the HTML index in the web browser after generation""")
parser.add_option('--tarball', dest="make_tarball", action='store_true', default=False,
help="""Generates a tarball of the documentation in dist/ directory""")
parser.add_option('-s', '--silent', dest="silent", action='store_true', default=False,
help="""Hides doxygen output""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
build_doc( options )
if __name__ == '__main__':
main()
| lgpl-3.0 |
danielvdende/incubator-airflow | airflow/contrib/operators/bigquery_to_gcs.py | 7 | 4491 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class BigQueryToCloudStorageOperator(BaseOperator):
"""
Transfers a BigQuery table to a Google Cloud Storage bucket.
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param source_project_dataset_table: The dotted
(<project>.|<project>:)<dataset>.<table> BigQuery table to use as the source
data. If <project> is not included, project will be the project
defined in the connection json. (templated)
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). (templated) Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type field_delimiter: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
:param bigquery_conn_id: reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
template_fields = ('source_project_dataset_table',
'destination_cloud_storage_uris', 'labels')
template_ext = ('.sql',)
ui_color = '#e4e6f0'
@apply_defaults
def __init__(self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
bigquery_conn_id='bigquery_default',
delegate_to=None,
labels=None,
*args,
**kwargs):
super(BigQueryToCloudStorageOperator, self).__init__(*args, **kwargs)
self.source_project_dataset_table = source_project_dataset_table
self.destination_cloud_storage_uris = destination_cloud_storage_uris
self.compression = compression
self.export_format = export_format
self.field_delimiter = field_delimiter
self.print_header = print_header
self.bigquery_conn_id = bigquery_conn_id
self.delegate_to = delegate_to
self.labels = labels
def execute(self, context):
self.log.info('Executing extract of %s into: %s',
self.source_project_dataset_table,
self.destination_cloud_storage_uris)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_extract(
self.source_project_dataset_table,
self.destination_cloud_storage_uris,
self.compression,
self.export_format,
self.field_delimiter,
self.print_header,
self.labels)
| apache-2.0 |
escapewindow/python-scriptharness | scriptharness/exceptions.py | 2 | 2349 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Scriptharness exceptions.
These exceptions are written with several things in mind:
#. the exceptions should be unicode-capable in python 2.7 (py3 gets that
for free),
#. the exceptions should differentiate between user-facing exceptions and
developer-facing exceptions, and
#. ScriptHarnessFatal should exit the script.
There may be more exceptions in the future, to further differentiate between
errors.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
from scriptharness.unicode import to_unicode
import six
@six.python_2_unicode_compatible
class ScriptHarnessBaseException(Exception):
"""All scriptharness exceptions should inherit this exception.
However, in most cases you probably want to catch ScriptHarnessException
instead.
"""
def __str__(self):
"""This method will become __unicode__() in py2 via the
@six.python_2_unicode_compatible decorator.
"""
if six.PY3:
string = super(ScriptHarnessBaseException, self).__str__()
else:
string = super(ScriptHarnessBaseException, self).message
string = to_unicode(string, 'utf-8')
return string
class ScriptHarnessException(ScriptHarnessBaseException):
"""There is a problem in how scriptharness is being called.
All developer-facing exceptions should inherit this class.
If you want to catch all developer-facing scriptharness exceptions,
catch ScriptHarnessException.
"""
class ScriptHarnessTimeout(ScriptHarnessException):
"""There was a timeout while running scriptharness.
"""
class ScriptHarnessError(ScriptHarnessBaseException):
"""User-facing exception.
Scriptharness has detected an error in the running process.
Since this exception is not designed to always exit, it's best to
catch these and deal with the error.
"""
class ScriptHarnessFatal(SystemExit, ScriptHarnessBaseException):
"""User-facing exception.
Scriptharness has detected a fatal failure in the running process.
This exception should result in program termination; using try/except may
result in unexpected or dangerous behavior.
"""
def __str__(self):
return ScriptHarnessBaseException.__str__(self)
| mpl-2.0 |
gqwest-erp/server | openerp/tools/lru.py | 204 | 2946 | # -*- coding: utf-8 -*-
# taken from http://code.activestate.com/recipes/252524-length-limited-o1-lru-cache-implementation/
import threading
from func import synchronized
__all__ = ['LRU']
class LRUNode(object):
__slots__ = ['prev', 'next', 'me']
def __init__(self, prev, me):
self.prev = prev
self.me = me
self.next = None
class LRU(object):
"""
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
http://pype.sourceforge.net
Copyright 2003 Josiah Carlson.
"""
def __init__(self, count, pairs=[]):
self._lock = threading.RLock()
self.count = max(count, 1)
self.d = {}
self.first = None
self.last = None
for key, value in pairs:
self[key] = value
@synchronized()
def __contains__(self, obj):
return obj in self.d
@synchronized()
def __getitem__(self, obj):
a = self.d[obj].me
self[a[0]] = a[1]
return a[1]
@synchronized()
def __setitem__(self, obj, val):
if obj in self.d:
del self[obj]
nobj = LRUNode(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.d[obj] = nobj
if len(self.d) > self.count:
if self.first == self.last:
self.first = None
self.last = None
return
a = self.first
a.next.prev = None
self.first = a.next
a.next = None
del self.d[a.me[0]]
del a
@synchronized()
def __delitem__(self, obj):
nobj = self.d[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
del self.d[obj]
@synchronized()
def __iter__(self):
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.me[1]
cur = cur2
@synchronized()
def __len__(self):
return len(self.d)
@synchronized()
def iteritems(self):
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.me
cur = cur2
@synchronized()
def iterkeys(self):
return iter(self.d)
@synchronized()
def itervalues(self):
for i,j in self.iteritems():
yield j
@synchronized()
def keys(self):
return self.d.keys()
@synchronized()
def pop(self,key):
v=self[key]
del self[key]
return v
@synchronized()
def clear(self):
self.d = {}
self.first = None
self.last = None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bollu/vispy | examples/demo/gloo/brain.py | 18 | 4553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 2
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
3D brain mesh viewer.
"""
from timeit import default_timer
import numpy as np
from vispy import gloo
from vispy import app
from vispy.util.transforms import perspective, translate, rotate
from vispy.io import load_data_file
brain = np.load(load_data_file('brain/brain.npz', force_download='2014-09-04'))
data = brain['vertex_buffer']
faces = brain['index_buffer']
VERT_SHADER = """
#version 120
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform vec4 u_color;
attribute vec3 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
varying vec3 v_position;
varying vec3 v_normal;
varying vec4 v_color;
void main()
{
v_normal = a_normal;
v_position = a_position;
v_color = a_color * u_color;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
}
"""
FRAG_SHADER = """
#version 120
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_normal;
uniform vec3 u_light_intensity;
uniform vec3 u_light_position;
varying vec3 v_position;
varying vec3 v_normal;
varying vec4 v_color;
void main()
{
// Calculate normal in world coordinates
vec3 normal = normalize(u_normal * vec4(v_normal,1.0)).xyz;
// Calculate the location of this fragment (pixel) in world coordinates
vec3 position = vec3(u_view*u_model * vec4(v_position, 1));
// Calculate the vector from this pixels surface to the light source
vec3 surfaceToLight = u_light_position - position;
// Calculate the cosine of the angle of incidence (brightness)
float brightness = dot(normal, surfaceToLight) /
(length(surfaceToLight) * length(normal));
brightness = max(min(brightness,1.0),0.0);
// Calculate final color of the pixel, based on:
// 1. The angle of incidence: brightness
// 2. The color/intensities of the light: light.intensities
// 3. The texture and texture coord: texture(tex, fragTexCoord)
// Specular lighting.
vec3 surfaceToCamera = vec3(0.0, 0.0, 1.0) - position;
vec3 K = normalize(normalize(surfaceToLight) + normalize(surfaceToCamera));
float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0);
gl_FragColor = v_color * brightness * vec4(u_light_intensity, 1);
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive')
self.size = 800, 600
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.theta, self.phi = -80, 180
self.translate = 3
self.faces = gloo.IndexBuffer(faces)
self.program.bind(gloo.VertexBuffer(data))
self.program['u_color'] = 1, 1, 1, 1
self.program['u_light_position'] = (1., 1., 1.)
self.program['u_light_intensity'] = (1., 1., 1.)
self.apply_zoom()
gloo.set_state(blend=False, depth_test=True, polygon_offset_fill=True)
self._t0 = default_timer()
self._timer = app.Timer('auto', connect=self.on_timer, start=True)
self.update_matrices()
def update_matrices(self):
self.view = translate((0, 0, -self.translate))
self.model = np.dot(rotate(self.theta, (1, 0, 0)),
rotate(self.phi, (0, 1, 0)))
self.projection = np.eye(4, dtype=np.float32)
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.program['u_normal'] = np.linalg.inv(np.dot(self.view,
self.model)).T
def on_timer(self, event):
elapsed = default_timer() - self._t0
self.phi = 180 + elapsed * 50.
self.update_matrices()
self.update()
def on_resize(self, event):
self.apply_zoom()
def on_mouse_wheel(self, event):
self.translate += -event.delta[1]/5.
self.translate = max(2, self.translate)
self.update_matrices()
self.update()
def on_draw(self, event):
gloo.clear()
self.program.draw('triangles', indices=self.faces)
def apply_zoom(self):
gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1])
self.projection = perspective(45.0, self.size[0] /
float(self.size[1]), 1.0, 20.0)
self.program['u_projection'] = self.projection
if __name__ == '__main__':
c = Canvas()
c.show()
app.run()
| bsd-3-clause |
jhuapl-boss/intern | intern/service/cv/project.py | 1 | 4848 | # Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.service.cv.service import CloudVolumeService
from intern.service.cv.metadata import MetadataService
from intern.resource.cv.resource import CloudVolumeResource
from cloudvolume import CloudVolume, Vec
class ProjectService(CloudVolumeService):
"""
ProjectService for cloudvolume service.
"""
def __init__(self, protocol, cloudpath):
"""
Constructor.
Args:
protocol (str) : protocol to use. Currently supports 'local', 'gs', and 's3'
cloudpath (str) : in the form of "$BUCKET/../$DATASET/$LAYER"
"""
CloudVolumeService.__init__(self)
self.protocol = protocol
self.cloudpath = cloudpath
def cloudvolume(self, mip, info, parallel, cache, **kwargs):
"""
Creates cloud-volume resource
Args:
mip (int): which mip layer to access
info (dict) : json-encodable dictionary of layer parameters. Necessary for creating a
new cloudvolume instance.
parallel (int: 1, bool): Number of extra processes to launch, 1 means only
use the main process. If parallel is True use the number of CPUs
returned by multiprocessing.cpu_count(). When parallel > 1, shared
memory (Linux) or emulated shared memory via files (other platforms)
is used by the underlying download.
cache (bool or str) Store downs and uploads in a cache on disk
and preferentially read from it before redownloading.
- False: no caching will occur.
- True: cache will be located in a standard location.
- non-empty string: cache is located at this file path
kwargs: optional arguments (https://github.com/seung-lab/cloud-volume#cloudvolume-constructor)
Returns:
CloudVolume Object
"""
return CloudVolumeResource(
self.protocol,
self.cloudpath,
mip=mip,
info=info,
parallel=parallel,
cache=cache,
**kwargs
)
def create_new_info(
self,
num_channels,
layer_type,
data_type,
resolution,
volume_size,
voxel_offset=(0, 0, 0),
encoding="raw",
chunk_size=(64, 64, 64),
mesh=None,
skeletons=None,
compressed_segmentation_block_size=(8, 8, 8),
max_mip=0,
factor=(2, 2, 1),
):
"""
Creates the info JSON necessary for a new cloudvolume resource.
Args:
Required:
num_channels: (int) 1 for grayscale, 3 for RGB
layer_type: (str) typically "image" or "segmentation"
data_type: (str) e.g. "uint8", "uint16", "uint32", "float32"
resolution: int (x,y,z), x,y,z voxel dimensions in nanometers
volume_size: int (x,y,z), extent of dataset in cartesian space from voxel_offset
Optional:
voxel_offset: int (x,y,z), beginning of dataset in positive cartesian space
encoding: (str) "raw" for binaries like numpy arrays, "jpeg"
mesh: (str) name of mesh directory, typically "mesh"
skeletons: (str) name of skeletons directory, typically "skeletons"
chunk_size: int (x,y,z), dimensions of each downloadable 3D image chunk in voxels
compressed_segmentation_block_size: (x,y,z) dimensions of each compressed sub-block
(only used when encoding is 'compressed_segmentation')
max_mip: (int), the maximum mip level id.
factor: (tuple), the downsampling factor for each mip level
Returns: dict representing a single mip level that's JSON encodable
"""
return CloudVolume.create_new_info(
num_channels,
layer_type,
data_type,
encoding,
resolution,
voxel_offset,
volume_size,
mesh,
skeletons,
chunk_size,
compressed_segmentation_block_size,
max_mip,
factor,
)
| apache-2.0 |
sonaht/ansible | lib/ansible/module_utils/aws/core.py | 50 | 5402 | #
# Copyright 2017 Michael De La Rue | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""This module adds shared support for generic Amazon AWS modules
**This code is not yet ready for use in user modules. As of 2017**
**and through to 2018, the interface is likely to change**
**aggressively as the exact correct interface for ansible AWS modules**
**is identified. In particular, until this notice goes away or is**
**changed, methods may disappear from the interface. Please don't**
**publish modules using this except directly to the main Ansible**
**development repository.**
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.aws import AnsibleAWSModule
module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
mutually_exclusive=list1, required_together=list2)
The 'AnsibleAWSModule' module provides similar, but more restricted,
interfaces to the normal Ansible module. It also includes the
additional methods for connecting to AWS using the standard module arguments
try:
m.aws_connect(resource='lambda') # - get an AWS connection.
except Exception:
m.fail_json_aws(Exception, msg="trying to connect") # - take an exception and make a decent failure
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec
import traceback
# We will also export HAS_BOTO3 so end user modules can use it.
__all__ = ('AnsibleAWSModule', 'HAS_BOTO3',)
class AnsibleAWSModule(object):
"""An ansible module class for AWS modules
AnsibleAWSModule provides an a class for building modules which
connect to Amazon Web Services. The interface is currently more
restricted than the basic module class with the aim that later the
basic module class can be reduced. If you find that any key
feature is missing please contact the author/Ansible AWS team
(available on #ansible-aws on IRC) to request the additional
features needed.
"""
default_settings = {
"default_args": True,
"check_boto3": True,
"auto_retry": True,
"module_class": AnsibleModule
}
def __init__(self, **kwargs):
local_settings = {}
for key in AnsibleAWSModule.default_settings:
try:
local_settings[key] = kwargs.pop(key)
except KeyError:
local_settings[key] = AnsibleAWSModule.default_settings[key]
self.settings = local_settings
if local_settings["default_args"]:
# ec2_argument_spec contains the region so we use that; there's a patch coming which
# will add it to aws_argument_spec so if that's accepted then later we should change
# over
argument_spec_full = ec2_argument_spec()
try:
argument_spec_full.update(kwargs["argument_spec"])
except (TypeError, NameError):
pass
kwargs["argument_spec"] = argument_spec_full
self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
if local_settings["check_boto3"] and not HAS_BOTO3:
self._module.fail_json(
msg='Python modules "botocore" or "boto3" are missing, please install both')
self.check_mode = self._module.check_mode
@property
def params(self):
return self._module.params
def exit_json(self, *args, **kwargs):
return self._module.exit_json(*args, **kwargs)
def fail_json(self, *args, **kwargs):
return self._module.fail_json(*args, **kwargs)
def fail_json_aws(self, exception, msg=None):
"""call fail_json with processed exception
function for converting exceptions thrown by AWS SDK modules,
botocore, boto3 and boto, into nice error messages.
"""
last_traceback = traceback.format_exc()
# to_native is trusted to handle exceptions that str() could
# convert to text.
try:
except_msg = to_native(exception.message)
except AttributeError:
except_msg = to_native(exception)
if msg is not None:
message = '{0}: {1}'.format(msg, except_msg)
else:
message = except_msg
try:
response = exception.response
except AttributeError:
response = None
if response is None:
self._module.fail_json(msg=message, exception=last_traceback)
else:
self._module.fail_json(msg=message, exception=last_traceback,
**camel_dict_to_snake_dict(response))
| gpl-3.0 |
aurofable/medhack-server | venv/lib/python2.7/encodings/uu_codec.py | 383 | 3738 | """ Python 'uu_codec' Codec - UU content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs, binascii
### Codec APIs
def uu_encode(input,errors='strict',filename='<data>',mode=0666):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import b2a_uu
# using str() because of cStringIO's Unicode undesired Unicode behavior.
infile = StringIO(str(input))
outfile = StringIO()
read = infile.read
write = outfile.write
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
while chunk:
write(b2a_uu(chunk))
chunk = read(45)
write(' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
Note: filename and file mode information in the input data is
ignored.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import a2b_uu
infile = StringIO(str(input))
outfile = StringIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError, 'Missing "begin" line in input data'
if s[:5] == 'begin':
break
# Decode
while 1:
s = readline()
if not s or \
s == 'end\n':
break
try:
data = a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError, 'Truncated input data'
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return uu_encode(input,errors)
def decode(self,input,errors='strict'):
return uu_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return uu_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return uu_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='uu',
encode=uu_encode,
decode=uu_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/django/contrib/admindocs/utils.py | 216 | 3801 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.parser import HeaderParser
from email.errors import HeaderParseError
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()])
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Returns (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform' : True,
'inital_header_level' : 3,
"default_reference_context" : default_reference_context,
"link_base" : reverse('django-admindocs-docroot').rstrip('/')
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
parts = docutils.core.publish_parts(text, source_path=thing_being_parsed,
destination_path=None, writer_name='html',
settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model' : '%s/models/%s/',
'view' : '%s/views/%s/',
'template' : '%s/templates/%s/',
'filter' : '%s/filters/#%s',
'tag' : '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference'
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| apache-2.0 |
mcgachey/edx-platform | setup.py | 11 | 3320 | """
Setup script for the Open edX package.
"""
from setuptools import setup
setup(
name="Open edX",
version="0.5",
install_requires=["setuptools"],
requires=[],
# NOTE: These are not the names we should be installing. This tree should
# be reorganized to be a more conventional Python tree.
packages=[
"openedx.core.djangoapps.course_groups",
"openedx.core.djangoapps.credit",
"openedx.core.djangoapps.user_api",
"lms",
"cms",
],
entry_points={
"openedx.course_tab": [
"ccx = lms.djangoapps.ccx.plugins:CcxCourseTab",
"courseware = lms.djangoapps.courseware.tabs:CoursewareTab",
"course_info = lms.djangoapps.courseware.tabs:CourseInfoTab",
"discussion = lms.djangoapps.django_comment_client.forum.views:DiscussionTab",
"edxnotes = lms.djangoapps.edxnotes.plugins:EdxNotesTab",
"external_discussion = lms.djangoapps.courseware.tabs:ExternalDiscussionCourseTab",
"external_link = lms.djangoapps.courseware.tabs:ExternalLinkCourseTab",
"html_textbooks = lms.djangoapps.courseware.tabs:HtmlTextbookTabs",
"instructor = lms.djangoapps.instructor.views.instructor_dashboard:InstructorDashboardTab",
"notes = lms.djangoapps.notes.views:NotesTab",
"pdf_textbooks = lms.djangoapps.courseware.tabs:PDFTextbookTabs",
"progress = lms.djangoapps.courseware.tabs:ProgressTab",
"static_tab = xmodule.tabs:StaticTab",
"syllabus = lms.djangoapps.courseware.tabs:SyllabusTab",
"teams = lms.djangoapps.teams.plugins:TeamsTab",
"textbooks = lms.djangoapps.courseware.tabs:TextbookTabs",
"wiki = lms.djangoapps.course_wiki.tab:WikiTab",
# ORA 1 tabs (deprecated)
"peer_grading = lms.djangoapps.open_ended_grading.views:PeerGradingTab",
"staff_grading = lms.djangoapps.open_ended_grading.views:StaffGradingTab",
"open_ended = lms.djangoapps.open_ended_grading.views:OpenEndedGradingTab",
],
"openedx.user_partition_scheme": [
"random = openedx.core.djangoapps.user_api.partition_schemes:RandomUserPartitionScheme",
"cohort = openedx.core.djangoapps.course_groups.partition_scheme:CohortPartitionScheme",
"verification = openedx.core.djangoapps.credit.partition_schemes:VerificationPartitionScheme",
],
"openedx.block_structure_transformer": [
"library_content = lms.djangoapps.course_blocks.transformers.library_content:ContentLibraryTransformer",
"split_test = lms.djangoapps.course_blocks.transformers.split_test:SplitTestTransformer",
"start_date = lms.djangoapps.course_blocks.transformers.start_date:StartDateTransformer",
"user_partitions = lms.djangoapps.course_blocks.transformers.user_partitions:UserPartitionTransformer",
"visibility = lms.djangoapps.course_blocks.transformers.visibility:VisibilityTransformer",
"course_blocks_api = lms.djangoapps.course_api.blocks.transformers.blocks_api:BlocksAPITransformer",
"proctored_exam = lms.djangoapps.course_api.blocks.transformers.proctored_exam:ProctoredExamTransformer",
],
}
)
| agpl-3.0 |
roberthawdon/toonbot | subprocesses/post-queue.py | 1 | 6346 | #!/usr/bin/env python
# Toon Bot - Poster Bot Subprocess
#
# _____ U ___ u U ___ u _ _ ____ U ___ u _____
# |_ " _| \/"_ \/ \/"_ \/ | \ |"| U | __")u \/"_ \/|_ " _|
# | | | | | | | | | |<| \| |> \| _ \/ | | | | | |
# /| |\.-,_| |_| |.-,_| |_| |U| |\ |u | |_) |.-,_| |_| | /| |\
# u |_|U \_)-\___/ \_)-\___/ |_| \_| |____/ \_)-\___/ u |_|U
# _// \\_ \\ \\ || \\,-. _|| \\_ \\ _// \\_
# (__) (__) (__) (__) (_") (_/ (__) (__) (__) (__) (__)
#
# Providing 5 minute breaks since 2016
#
# By Robert Hawdon - https://robertianhawdon.me.uk/
import sys
from argparse import ArgumentParser
import MySQLdb
import random
import time
import urllib
import urllib2
import yaml
import json
import os
import os.path
from tendo import singleton
me = singleton.SingleInstance()
script_dirpath = os.path.dirname(os.path.join(os.getcwd(), __file__))
class PosterBot(object):
def __init__(self, config):
# set the config object
self.config = config
# set slack token
self.token = config.get('SLACK_TOKEN')
# set mysql details
self.mysqlserver = config.get('MYSQL_SERVER')
self.mysqluser = config.get('MYSQL_USER')
self.mysqlpass = config.get('MYSQL_PASS')
self.mysqldb = config.get('MYSQL_DB')
# self.postcolor = config.get('POST_COLOR')
# self.posttextcolor = config.get('POST_TEXT_COLOR')
self.process_queue()
def process_queue(self):
#try:
conn = MySQLdb.Connection(self.mysqlserver, self.mysqluser, self.mysqlpass, self.mysqldb)
curs = conn.cursor()
conn.set_character_set('utf8')
curs.execute('SET NAMES utf8;')
curs.execute('SET CHARACTER SET utf8;')
curs.execute('SET character_set_connection=utf8;')
cmd = "SELECT value FROM tbl_system WHERE name = 'postcolor'"
curs.execute(cmd)
result = curs.fetchall()
for color in result:
defaultpostcolor = color[0]
cmd = "SELECT value FROM tbl_system WHERE name = 'posttextcolor'"
curs.execute(cmd)
result = curs.fetchall()
for color in result:
defaultposttextcolor = color[0]
cmd = "SELECT Q.ID, Q.slackuser, Q.displayname, Q.comichash, Q.flags, U.dmid FROM tbl_queue Q LEFT JOIN tbl_users U ON U.slackuser = Q.slackuser WHERE Q.sent = 0"
curs.execute(cmd)
result = curs.fetchall()
for items in result:
id = items[0]
slackuser = items[1]
displayname = items[2]
comichash = items[3]
flags = items[4]
dmid = items[5]
cmd = "SELECT ID FROM tbl_users WHERE slackuser = %s"
curs.execute(cmd, [slackuser])
result = curs.fetchall()
for users in result:
userid = users[0]
cmd = "SELECT name, value FROM tbl_preferences WHERE userID = %s"
curs.execute(cmd, [userid])
result = curs.fetchall()
prefname = []
prefvalue = []
for preferences in result:
prefname.append(preferences[0])
prefvalue.append(preferences[1])
if 'postcolor' in prefname:
postcolor = prefvalue[prefname.index("postcolor")]
else:
postcolor = defaultpostcolor
if 'posttextcolor' in prefname:
posttextcolor = prefvalue[prefname.index("posttextcolor")]
else:
posttextcolor = defaultposttextcolor
cmd = "SELECT image, pageurl, title, text FROM tbl_comic_data WHERE comichash = %s"
curs.execute(cmd, ([comichash]))
result2 = curs.fetchall()
for comic in result2:
image = comic[0]
pageurl = comic[1]
title = comic[2]
if title:
utitle = title.decode("utf-8")
title = utitle.encode("ascii", "ignore")
text = comic[3]
if text:
utext = text.decode("utf-8")
text = utext.encode("ascii", "ignore")
if title is None:
title = displayname
if text is not None:
body = [{"title": title,"title_link": pageurl,"author_name": displayname,"image_url": image,"color": "#" + postcolor}, {"text": text, "color": "#" + posttextcolor}]
else:
body = [{"title": title,"title_link": pageurl,"author_name": displayname,"image_url": image,"color": "#" + postcolor}]
data = body
#print json.dumps(data)
attachment = urllib.quote(str(json.dumps(data)))
url = "https://slack.com/api/chat.postMessage?token=" + self.token + "&channel=" + dmid + "&attachments=" + attachment + "&as_user=true"
req = urllib2.Request(url)
response = urllib2.urlopen(req)
# print response.read()
image is None
pageurl is None
title is None
text is None
jsonres = json.load(response)
if jsonres["ok"] is True:
cmd = "UPDATE tbl_queue SET sent = 1 WHERE ID = %s"
curs.execute(cmd, ([id]))
conn.commit()
else:
errormessage = jsonres["error"]
cmd = "UPDATE tbl_queue SET flags = 1, errormessage = %s WHERE ID = %s"
curs.execute(cmd, ([errormessage], [id]))
cmd = "INSERT INTO tbl_queue_errors (errormessage, queueID) VALUES (%s, %s)"
curs.execute(cmd, ([errormessage], [id]))
conn.commit()
time.sleep(1)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
# load args with config path
args = parse_args()
config = yaml.load(open(args.config or script_dirpath + '/../toonbot.conf', 'r'))
PosterBot(config)
| gpl-3.0 |
jitka/weblate | weblate/trans/tests/__init__.py | 2 | 1838 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import tempfile
import shutil
from weblate import appsettings
class OverrideSettings(object):
"""
makes a context manager also act as decorator
"""
TEMP_DIR = 0x12346578
def __init__(self, **values):
self._values = values
self._backup = {}
self._tempdir = None
def __enter__(self):
for name, value in self._values.items():
self._backup[name] = getattr(appsettings, name)
if value == self.TEMP_DIR:
self._tempdir = tempfile.mkdtemp()
setattr(appsettings, name, self._tempdir)
else:
setattr(appsettings, name, value)
return self
def __exit__(self, *args, **kwds):
for name in self._values.keys():
setattr(appsettings, name, self._backup[name])
if self._tempdir is not None:
shutil.rmtree(self._tempdir)
def __call__(self, func):
def wrapper(*args, **kwds):
with self:
return func(*args, **kwds)
return wrapper
| gpl-3.0 |
ChrisTruncer/EyeWitness | Python/modules/reporting.py | 1 | 17673 | import os
import sys
import urllib.parse
try:
from fuzzywuzzy import fuzz
except ImportError:
print('[*] fuzzywuzzy not found.')
print('[*] Please run the script in the setup directory!')
sys.exit()
def process_group(
data, group, toc, toc_table, page_num, section,
sectionid, html):
"""Retreives a group from the full data, and creates toc stuff
Args:
data (List): Full set of data containing all hosts
group (String): String representing group to process
toc (String): HTML for Table of Contents
toc_table (String): HTML for Table in ToC
page_num (int): Page number we're on in the report
section (String): Display name of the group
sectionid (String): Unique ID for ToC navigation
html (String): HTML for current page of report
Returns:
List: Elements for category sorted and grouped
String: HTML representing ToC
String: HTML representing ToC Table
String: HTML representing current report page
"""
group_data = sorted([x for x in data if x.category == group], key=lambda k: str(k.page_title))
grouped_elements = []
if len(group_data) == 0:
return grouped_elements, toc, toc_table, html
if page_num == 0:
toc += ("<li><a href=\"report.html#{0}\">{1} (Page 1)</a></li>").format(
sectionid, section)
else:
toc += ("<li><a href=\"report_page{0}.html#{1}\">{2} (Page {0})</a></li>").format(
str(page_num+1), sectionid, section)
html += "<h2 id=\"{0}\">{1}</h2>".format(sectionid, section)
unknowns = [x for x in group_data if x.page_title == 'Unknown']
group_data = [x for x in group_data if x.page_title != 'Unknown']
while len(group_data) > 0:
test_element = group_data.pop(0)
temp = [x for x in group_data if fuzz.token_sort_ratio(
test_element.page_title, x.page_title) >= 70]
temp.append(test_element)
temp = sorted(temp, key=lambda k: k.page_title)
grouped_elements.extend(temp)
group_data = [x for x in group_data if fuzz.token_sort_ratio(
test_element.page_title, x.page_title) < 70]
grouped_elements.extend(unknowns)
toc_table += ("<tr><td>{0}</td><td>{1}</td>").format(section,
str(len(grouped_elements)))
return grouped_elements, toc, toc_table, html
def sort_data_and_write(cli_parsed, data):
"""Writes out reports for HTTP objects
Args:
cli_parsed (TYPE): CLI Options
data (TYPE): Full set of data
"""
# We'll be using this number for our table of contents
total_results = len(data)
categories = [('highval', 'High Value Targets', 'highval'),
('dirlist', 'Directory Listings', 'dirlist'),
(None, 'Uncategorized', 'uncat'),
('cms', 'Content Management System (CMS)', 'cms'),
('idrac', 'IDRAC/ILo/Management Interfaces', 'idrac'),
('nas', 'Network Attached Storage (NAS)', 'nas'),
('construction', 'Under Construction', 'construction'),
('netdev', 'Network Devices', 'netdev'),
('voip', 'Voice/Video over IP (VoIP)', 'voip'),
('unauth', '401/403 Unauthorized', 'unauth'),
('notfound', '404 Not Found', 'notfound'),
('crap', 'Splash Pages', 'crap'),
('printer', 'Printers', 'printer'),
('successfulLogin', 'Successful Logins', 'successfulLogin'),
('identifiedLogin', 'Identified Logins', 'identifiedLogin'),
('infrastructure', 'Infrastructure', 'infrastructure'),
('redirector', 'Redirecting Pages', 'redirector'),
('badhost', 'Invalid Hostname', 'badhost'),
('inerror', 'Internal Error', 'inerror'),
('badreq', 'Bad Request', 'badreq'),
('serviceunavailable', 'Service Unavailable', 'serviceunavailable'),
]
if total_results == 0:
return
# Initialize stuff we need
pages = []
toc = create_report_toc_head(cli_parsed.date, cli_parsed.time)
toc_table = "<table class=\"table\">"
web_index_head = create_web_index_head(cli_parsed.date, cli_parsed.time)
table_head = create_table_head()
counter = 1
csv_request_data = "Protocol,Port,Domain,Request Status,Screenshot Path, Source Path"
# Generate and write json log of requests
for json_request in data:
url = urllib.parse.urlparse(json_request._remote_system)
# Determine protocol
csv_request_data += "\n" + url.scheme + ","
if url.port is not None:
csv_request_data += str(url.port) + ","
elif url.scheme == 'http':
csv_request_data += "80,"
elif url.scheme == 'https':
csv_request_data += "443,"
try:
csv_request_data += url.hostname + ","
except TypeError:
print("Error when accessing a target's hostname (it's not existent)")
print("Possible bad url (improperly formatted) in the URL list.")
print("Fix your list and re-try. Killing EyeWitness....")
sys.exit(1)
if json_request._error_state == None:
csv_request_data += "Successful,"
else:
csv_request_data += json_request._error_state + ","
csv_request_data += json_request._screenshot_path + ","
csv_request_data += json_request._source_path
with open(os.path.join(cli_parsed.d, 'Requests.csv'), 'a') as f:
f.write(csv_request_data)
# Pre-filter error entries
def key_lambda(k):
if k.error_state is None:
k.error_state = str(k.error_state)
if k.page_title is None:
k.page_title = str(k.page_title)
return (k.error_state, k.page_title)
errors = sorted([x for x in data if (x is not None) and (x.error_state is not None)],
key=key_lambda)
data[:] = [x for x in data if x.error_state is None]
data = sorted(data, key=lambda k: str(k.page_title))
html = u""
# Loop over our categories and populate HTML
for cat in categories:
grouped, toc, toc_table, html = process_group(
data, cat[0], toc, toc_table, len(pages), cat[1], cat[2], html)
if len(grouped) > 0:
html += table_head
pcount = 0
for obj in grouped:
pcount += 1
html += obj.create_table_html()
if (counter % cli_parsed.results == 0) or (counter == (total_results) -1):
html = (web_index_head + "EW_REPLACEME" + html +
"</table><br>")
pages.append(html)
html = u""
if pcount < len(grouped):
html += table_head
counter += 1
if len(grouped) > 0 and counter - 1 % cli_parsed.results != 0:
html += "</table><br>"
# Add our errors here (at the very very end)
if len(errors) > 0:
html += '<h2>Errors</h2>'
html += table_head
for obj in errors:
html += obj.create_table_html()
if (counter % cli_parsed.results == 0) or (counter == (total_results)):
html = (web_index_head + "EW_REPLACEME" + html +
"</table><br>")
pages.append(html)
html = u"" + table_head
counter += 1
# Close out any stuff thats hanging
toc += "</ul>"
toc_table += "<tr><td>Errors</td><td>{0}</td></tr>".format(
str(len(errors)))
toc_table += "<tr><th>Total</th><td>{0}</td></tr>".format(total_results)
toc_table += "</table>"
if (html != u"") and (counter - total_results != 0):
html = (web_index_head + "EW_REPLACEME" + html +
"</table><br>")
pages.append(html)
toc = "<center>{0}<br><br>{1}<br><br></center>".format(toc, toc_table)
if len(pages) == 1:
with open(os.path.join(cli_parsed.d, 'report.html'), 'a') as f:
f.write(toc)
f.write(pages[0].replace('EW_REPLACEME', ''))
f.write("</body>\n</html>")
else:
num_pages = len(pages) + 1
bottom_text = "\n<center><br>"
bottom_text += ("<a href=\"report.html\"> Page 1</a>")
skip_last_dummy = False
# Generate our header/footer data here
for i in range(2, num_pages):
badd_page = "</center>EW_REPLACEME<table border=\"1\">\n <tr>\n <th>Web Request Info</th>\n <th>Web Screenshot</th>\n </tr></table><br>"
if badd_page in pages[i-1]:
skip_last_dummy = True
pass
else:
bottom_text += ("<a href=\"report_page{0}.html\"> Page {0}</a>").format(str(i))
bottom_text += "</center>\n"
top_text = bottom_text
# Generate our next/previous page buttons
if skip_last_dummy:
amount = len(pages) - 1
else:
amount = len(pages)
for i in range(0, amount):
headfoot = "<h3>Page {0}</h3>".format(str(i+1))
headfoot += "<center>"
if i == 0:
headfoot += ("<a href=\"report_page2.html\" id=\"next\"> Next Page "
"</a></center>")
elif i == amount - 1:
if i == 1:
headfoot += ("<a href=\"report.html\" id=\"previous\"> Previous Page "
"</a></center>")
else:
headfoot += ("<a href=\"report_page{0}.html\" id=\"previous\"> Previous Page "
"</a></center>").format(str(i))
elif i == 1:
headfoot += ("<a href=\"report.html\" id=\"previous\">Previous Page</a> "
"<a href=\"report_page{0}.html\" id=\"next\"> Next Page"
"</a></center>").format(str(i+2))
else:
headfoot += ("<a href=\"report_page{0}.html\" id=\"previous\">Previous Page</a>"
" <a href=\"report_page{1}.html\" id=\"next\"> Next Page"
"</a></center>").format(str(i), str(i+2))
# Finalize our pages by replacing placeholder stuff and writing out
# the headers/footers
pages[i] = pages[i].replace(
'EW_REPLACEME', headfoot + top_text) + bottom_text + '<br>' + headfoot + '</body></html>'
# Write out our report to disk!
if len(pages) == 0:
return
with open(os.path.join(cli_parsed.d, 'report.html'), 'a') as f:
f.write(toc)
f.write(pages[0])
write_out = len(pages)
for i in range(2, write_out + 1):
bad_page = "<table border=\"1\">\n <tr>\n <th>Web Request Info</th>\n <th>Web Screenshot</th>\n </tr></table><br>\n<center><br><a "
badd_page2 = "</center>EW_REPLACEME<table border=\"1\">\n <tr>\n <th>Web Request Info</th>\n <th>Web Screenshot</th>\n </tr></table><br>"
if (bad_page in pages[i-1]) or (badd_page2 in pages[i-1]):
pass
else:
with open(os.path.join(cli_parsed.d, 'report_page{0}.html'.format(str(i))), 'w') as f:
f.write(pages[i - 1])
def create_web_index_head(date, time):
"""Creates the header for a http report
Args:
date (String): Date of report start
time (String): Time of report start
Returns:
String: HTTP Report Start html
"""
return ("""<html>
<head>
<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\" type=\"text/css\"/>
<title>EyeWitness Report</title>
<script src="jquery-1.11.3.min.js"></script>
<script type="text/javascript">
function toggleUA(id, url){{
idi = "." + id;
$(idi).toggle();
change = document.getElementById(id);
if (change.innerHTML.indexOf("expand") > -1){{
change.innerHTML = "Click to collapse User Agents for " + url;
}}else{{
change.innerHTML = "Click to expand User Agents for " + url;
}}
}}
document.onkeydown = function(event){{
event = event || window.event;
switch (event.keyCode){{
case 37:
leftArrow();
break;
case 39:
rightArrow();
break;
}}
}};
function leftArrow(){{
$('#previous')[0].click();
}};
function rightArrow(){{
$('#next')[0].click();
}};
</script>
</head>
<body>
<center>
<center>Report Generated on {0} at {1}</center>""").format(date, time)
def search_index_head():
return ("""<html>
<head>
<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\" type=\"text/css\"/>
<title>EyeWitness Report</title>
<script src="jquery-1.11.3.min.js"></script>
<script type="text/javascript">
function toggleUA(id, url){{
idi = "." + id;
$(idi).toggle();
change = document.getElementById(id);
if (change.innerHTML.indexOf("expand") > -1){{
change.innerHTML = "Click to collapse User Agents for " + url;
}}else{{
change.innerHTML = "Click to expand User Agents for " + url;
}}
}}
</script>
</head>
<body>
<center>
""")
def create_table_head():
return ("""<table border=\"1\">
<tr>
<th>Web Request Info</th>
<th>Web Screenshot</th>
</tr>""")
def create_report_toc_head(date, time):
return ("""<html>
<head>
<title>EyeWitness Report Table of Contents</title>
</head>
<h2>Table of Contents</h2>""")
def search_report(cli_parsed, data, search_term):
pages = []
web_index_head = search_index_head()
table_head = create_table_head()
counter = 1
data[:] = [x for x in data if x.error_state is None]
data = sorted(data, key=lambda k: k.page_title)
html = u""
# Add our errors here (at the very very end)
html += '<h2>Results for {0}</h2>'.format(search_term)
html += table_head
for obj in data:
html += obj.create_table_html()
if counter % cli_parsed.results == 0:
html = (web_index_head + "EW_REPLACEME" + html +
"</table><br>")
pages.append(html)
html = u"" + table_head
counter += 1
if html != u"":
html = (web_index_head + html + "</table><br>")
pages.append(html)
if len(pages) == 1:
with open(os.path.join(cli_parsed.d, 'search.html'), 'a') as f:
f.write(pages[0].replace('EW_REPLACEME', ''))
f.write("</body>\n</html>")
else:
num_pages = len(pages) + 1
bottom_text = "\n<center><br>"
bottom_text += ("<a href=\"search.html\"> Page 1</a>")
# Generate our header/footer data here
for i in range(2, num_pages):
bottom_text += ("<a href=\"search_page{0}.html\"> Page {0}</a>").format(
str(i))
bottom_text += "</center>\n"
top_text = bottom_text
# Generate our next/previous page buttons
for i in range(0, len(pages)):
headfoot = "<center>"
if i == 0:
headfoot += ("<a href=\"search_page2.html\"> Next Page "
"</a></center>")
elif i == len(pages) - 1:
if i == 1:
headfoot += ("<a href=\"search.html\"> Previous Page "
"</a></center>")
else:
headfoot += ("<a href=\"search_page{0}.html\"> Previous Page "
"</a></center>").format(str(i))
elif i == 1:
headfoot += ("<a href=\"search.html\">Previous Page</a> "
"<a href=\"search_page{0}.html\"> Next Page"
"</a></center>").format(str(i+2))
else:
headfoot += ("<a href=\"search_page{0}.html\">Previous Page</a>"
" <a href=\"search_page{1}.html\"> Next Page"
"</a></center>").format(str(i), str(i+2))
# Finalize our pages by replacing placeholder stuff and writing out
# the headers/footers
pages[i] = pages[i].replace(
'EW_REPLACEME', headfoot + top_text) + bottom_text + '<br>' + headfoot + '</body></html>'
# Write out our report to disk!
if len(pages) == 0:
return
with open(os.path.join(cli_parsed.d, 'search.html'), 'a') as f:
try:
f.write(pages[0])
except UnicodeEncodeError:
f.write(pages[0].encode('utf-8'))
for i in range(2, len(pages) + 1):
with open(os.path.join(cli_parsed.d, 'search_page{0}.html'.format(str(i))), 'w') as f:
try:
f.write(pages[i - 1])
except UnicodeEncodeError:
f.write(pages[i - 1].encode('utf-8'))
| gpl-3.0 |
RKrahl/pytest-dependency | pytest_dependency.py | 1 | 6492 | """$DOC"""
__version__ = "$VERSION"
import logging
import pytest
logger = logging.getLogger(__name__)
_automark = False
_ignore_unknown = False
def _get_bool(value):
"""Evaluate string representation of a boolean value.
"""
if value:
if value.lower() in ["0", "no", "n", "false", "f", "off"]:
return False
elif value.lower() in ["1", "yes", "y", "true", "t", "on"]:
return True
else:
raise ValueError("Invalid truth value '%s'" % value)
else:
return False
class DependencyItemStatus(object):
"""Status of a test item in a dependency manager.
"""
Phases = ('setup', 'call', 'teardown')
def __init__(self):
self.results = { w:None for w in self.Phases }
def __str__(self):
l = ["%s: %s" % (w, self.results[w]) for w in self.Phases]
return "Status(%s)" % ", ".join(l)
def addResult(self, rep):
self.results[rep.when] = rep.outcome
def isSuccess(self):
return list(self.results.values()) == ['passed', 'passed', 'passed']
class DependencyManager(object):
"""Dependency manager, stores the results of tests.
"""
ScopeCls = {
'session': pytest.Session,
'package': pytest.Package,
'module': pytest.Module,
'class': pytest.Class,
}
@classmethod
def getManager(cls, item, scope):
"""Get the DependencyManager object from the node at scope level.
Create it, if not yet present.
"""
node = item.getparent(cls.ScopeCls[scope])
if not node:
return None
if not hasattr(node, 'dependencyManager'):
node.dependencyManager = cls(scope)
return node.dependencyManager
def __init__(self, scope):
self.results = {}
self.scope = scope
def addResult(self, item, name, rep):
if not name:
# Old versions of pytest used to add an extra "::()" to
# the node ids of class methods to denote the class
# instance. This has been removed in pytest 4.0.0.
nodeid = item.nodeid.replace("::()::", "::")
if self.scope == 'session' or self.scope == 'package':
name = nodeid
elif self.scope == 'module':
name = nodeid.split("::", 1)[1]
elif self.scope == 'class':
name = nodeid.split("::", 2)[2]
else:
raise RuntimeError("Internal error: invalid scope '%s'"
% self.scope)
status = self.results.setdefault(name, DependencyItemStatus())
logger.debug("register %s %s %s in %s scope",
rep.when, name, rep.outcome, self.scope)
status.addResult(rep)
def checkDepend(self, depends, item):
logger.debug("check dependencies of %s in %s scope ...",
item.name, self.scope)
for i in depends:
if i in self.results:
if self.results[i].isSuccess():
logger.debug("... %s succeeded", i)
continue
else:
logger.debug("... %s has not succeeded", i)
else:
logger.debug("... %s is unknown", i)
if _ignore_unknown:
continue
logger.info("skip %s because it depends on %s", item.name, i)
pytest.skip("%s depends on %s" % (item.name, i))
def depends(request, other, scope='module'):
"""Add dependency on other test.
Call pytest.skip() unless a successful outcome of all of the tests in
other has been registered previously. This has the same effect as
the `depends` keyword argument to the :func:`pytest.mark.dependency`
marker. In contrast to the marker, this function may be called at
runtime during a test.
:param request: the value of the `request` pytest fixture related
to the current test.
:param other: dependencies, a list of names of tests that this
test depends on. The names of the dependencies must be
adapted to the scope.
:type other: iterable of :class:`str`
:param scope: the scope to search for the dependencies. Must be
either `'session'`, `'package'`, `'module'`, or `'class'`.
:type scope: :class:`str`
.. versionadded:: 0.2
.. versionchanged:: 0.5.0
the scope parameter has been added.
"""
item = request.node
manager = DependencyManager.getManager(item, scope=scope)
manager.checkDepend(other, item)
def pytest_addoption(parser):
parser.addini("automark_dependency",
"Add the dependency marker to all tests automatically",
default=False)
parser.addoption("--ignore-unknown-dependency",
action="store_true", default=False,
help="ignore dependencies whose outcome is not known")
def pytest_configure(config):
global _automark, _ignore_unknown
_automark = _get_bool(config.getini("automark_dependency"))
_ignore_unknown = config.getoption("--ignore-unknown-dependency")
config.addinivalue_line("markers",
"dependency(name=None, depends=[]): "
"mark a test to be used as a dependency for "
"other tests or to depend on other tests.")
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Store the test outcome if this item is marked "dependency".
"""
outcome = yield
marker = item.get_closest_marker("dependency")
if marker is not None or _automark:
rep = outcome.get_result()
name = marker.kwargs.get('name') if marker is not None else None
for scope in DependencyManager.ScopeCls:
manager = DependencyManager.getManager(item, scope=scope)
if (manager):
manager.addResult(item, name, rep)
def pytest_runtest_setup(item):
"""Check dependencies if this item is marked "dependency".
Skip if any of the dependencies has not been run successfully.
"""
marker = item.get_closest_marker("dependency")
if marker is not None:
depends = marker.kwargs.get('depends')
if depends:
scope = marker.kwargs.get('scope', 'module')
manager = DependencyManager.getManager(item, scope=scope)
manager.checkDepend(depends, item)
| apache-2.0 |
trondhindenes/ansible | lib/ansible/plugins/action/aruba.py | 21 | 3677 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.aruba.aruba import aruba_provider_spec
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(aruba_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'aruba'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
yfu/tools | circ/find_circ/merge_bed.py | 4 | 1854 | #!/usr/bin/env python
import sys,os
from collections import defaultdict
def read_to_hash(fname,ds=0,de=0,flank=0,cover=False):
#print "loading",fname
pos = {}
for line in file(fname):
if line.startswith("#"):
continue
line = line.strip()
chrom,start,end,name,score,sense = line.split('\t')[:6]
start,end = int(start)+ds,int(end)+de
#print (chrom,start,end,sense)
pos[(chrom,start,end,sense)] = line
if flank:
for x in xrange(flank):
pos[(chrom,start-x,end,sense)] = line
pos[(chrom,start+x,end,sense)] = line
pos[(chrom,start,end-x,sense)] = line
pos[(chrom,start,end+x,sense)] = line
#if cover:
#for x in xrange
return pos
N = defaultdict(int)
anna = read_to_hash(sys.argv[1],flank=0)
N['unique_input1'] = len(anna)
#print len(anna.keys())
marv = read_to_hash(sys.argv[2])
N['unique_input2'] = len(marv)
#print len(marv.keys())
for circ,line in marv.items():
if circ in anna:
if len(sys.argv) > 3:
print "%s\t%s" % (anna[circ].split('\t')[3],line.split('\t')[3])
else:
print anna[circ]
#print "M",line
N['overlap'] += 1
del anna[circ]
else:
N['input2_not_in_input1'] += 1
#print len(anna.keys())
for k,l in anna.items():
#if "HEK" in l:
print "MISSING\t%s" % l
N['input1_not_in_input2'] += 1
for k in sorted(N.keys()):
sys.stderr.write("%s\t%d\n" % (k,N[k]))
found = N['overlap']
detected = N['unique_input2']
total = N['unique_input1']
fp = N['input2_not_in_input1']
#print "#sensitivity %d/%d = %.2f %%" % (found,total,float(found)/total*100)
#print "#FDR %d/%d = %.2f %%" % (fp,detected,float(fp)/detected*100) | gpl-3.0 |
incnone/necrobot | necrobot/race/publicrace/raceroom.py | 1 | 11742 | # A necrobot "casual" race room.
import asyncio
import datetime
import discord
from necrobot.botbase.botchannel import BotChannel
from necrobot.botbase.necrobot import Necrobot
from necrobot.config import Config
from necrobot.race import cmd_race
from necrobot.race import raceinfo
from necrobot.race.publicrace import cmd_publicrace
from necrobot.race.race import Race, RaceEvent
from necrobot.test import cmd_test
from necrobot.util import server
from necrobot.util import strutil
from necrobot.race import racedb
class RaceRoom(BotChannel):
def __init__(self, race_discord_channel, race_info):
BotChannel.__init__(self)
self._channel = race_discord_channel # The necrobot in which this race is taking place
self._race_info = race_info # The type of races to be run in this room
self._current_race = None # The current race
self._previous_race = None # The previous race
self._race_number = 0 # The number of races we've done
self._mention_on_new_race = [] # A list of users that should be @mentioned when a rematch is created
self._mentioned_users = [] # A list of users that were @mentioned when this race was created
self._nopoke = False # When True, the .poke command fails
self.channel_commands = [
cmd_race.Enter(self),
cmd_race.Unenter(self),
cmd_race.Ready(self),
cmd_race.Unready(self),
cmd_race.Done(self),
cmd_race.Undone(self),
cmd_race.Forfeit(self),
cmd_race.Unforfeit(self),
cmd_race.Comment(self),
cmd_race.Death(self),
cmd_race.Igt(self),
cmd_race.Time(self),
cmd_race.ForceForfeit(self),
cmd_race.ForceForfeitAll(self),
cmd_race.Pause(self),
cmd_race.Unpause(self),
cmd_race.Reseed(self),
cmd_race.ChangeRules(self),
cmd_publicrace.Rematch(self),
cmd_publicrace.Kick(self),
cmd_publicrace.Notify(self),
cmd_publicrace.Unnotify(self),
cmd_publicrace.Missing(self),
cmd_publicrace.Shame(self),
cmd_publicrace.Poke(self),
cmd_publicrace.ForceCancel(self),
cmd_publicrace.ForceClose(self),
cmd_test.TestRace(self),
]
# Properties ------------------------------
@property
def channel(self):
return self._channel
# The currently active race. Is not None.
@property
def current_race(self):
return self._current_race
# A string to add to the race details (used for private races; empty in base class)
@property
def format_rider(self):
return ''
# The most recent race to begin, or None if no such
@property
def last_begun_race(self):
if not self._current_race.before_race:
return self._current_race
else:
return self._previous_race
@property
def mentioned_users(self):
return self._mentioned_users
@property
def race_info(self):
return self._race_info
@property
def results_channel(self):
return server.find_channel(channel_name=Config.RACE_RESULTS_CHANNEL_NAME)
# Returns the string to go in the topic for the leaderboard
@property
def leaderboard(self):
new_leaderboard = '``` \n' + strutil.tickless(self._leaderboard_header(self.current_race)) \
+ self.current_race.status_str + '\n'
new_leaderboard += 'Entrants:\n'
new_leaderboard += strutil.tickless(self.current_race.leaderboard_text)
new_leaderboard += '```'
return new_leaderboard
# Returns 'header' text for the race, giving info about the rules etc.
def _leaderboard_header(self, race: Race):
room_rider = self.format_rider
if room_rider:
room_rider = ' ' + room_rider
seed_str = race.race_info.seed_str
if seed_str:
seed_str = '\n' + seed_str
return race.race_info.format_str + room_rider + seed_str + '\n'
# Methods -------------------------------------------------------------
# Notifies the given user on a rematch
def notify(self, user: discord.Member):
if user not in self._mention_on_new_race:
self._mention_on_new_race.append(user)
# Removes notifications for the given user on rematch
def dont_notify(self, user: discord.Member):
self._mention_on_new_race = [u for u in self._mention_on_new_race if u != user]
def refresh(self, channel: discord.TextChannel):
self._channel = channel
# Coroutine methods ---------------------------------------------------
# Set up the leaderboard etc. Should be called after creation; code not put into __init__ b/c coroutine
async def initialize(self):
asyncio.ensure_future(self._monitor_for_cleanup())
await self._make_new_race()
await self.write('Enter the race with `.enter`, and type `.ready` when ready. '
'Finish the race with `.done` or `.forfeit`. Use `.help` for a command list.')
# Write text to the raceroom. Return a Message for the text written
async def write(self, text: str):
await self._channel.send(text)
# Processes a race event
async def process(self, race_event: RaceEvent):
if race_event.event == RaceEvent.EventType.RACE_END:
await asyncio.sleep(1) # Waiting for a short time feels good UI-wise
await self.write(
'The race is over. Results will be recorded in {} seconds. Until then, you may comment with '
'`.comment` or add an in-game-time with `.igt`.'.format(
self.current_race.race_config.finalize_time_sec))
elif race_event.event == RaceEvent.EventType.RACE_FINALIZE:
await racedb.record_race(race_event.race)
if race_event.race.race_info.post_results:
await self.post_result(race_event.race)
elif race_event.event == RaceEvent.EventType.RACE_CANCEL:
await self.write('The race has been canceled.')
await self.update()
elif race_event.event == RaceEvent.EventType.RACER_ENTER:
self.notify(race_event.racer_member)
elif race_event.event == RaceEvent.EventType.RACER_UNENTER:
self.dont_notify(race_event.racer_member)
else:
await self.update()
# Updates the leaderboard
async def update(self):
pass
# await self._channel.edit(topic=self.leaderboard)
# Post the race result to the race necrobot
async def post_result(self, race: Race):
await self.results_channel.send(
'Race begun at {0}:\n```\n{1}{2}\n```'.format(
race.start_datetime.strftime("%d %B %Y, UTC %H:%M"),
strutil.tickless(self._leaderboard_header(race)),
strutil.tickless(race.leaderboard_text)
)
)
# Commands ------------------------------------------------------------
async def set_post_result(self, do_post: bool):
self._race_info.post_results = do_post
if self.current_race.before_race:
self.current_race.race_info = raceinfo.RaceInfo.copy(self._race_info)
if do_post:
await self.write('Races in this channel will have their results posted to the results channel.')
else:
await self.write('Races in this channel will not have their results posted to the results channel.')
# Change the RaceInfo for this room
async def change_race_info(self, command_args: list):
new_race_info = raceinfo.parse_args_modify(command_args, raceinfo.RaceInfo.copy(self._race_info))
if new_race_info:
self._race_info = new_race_info
if self.current_race.before_race:
self.current_race.race_info = raceinfo.RaceInfo.copy(self._race_info)
await self.write('Changed rules for the next race.')
await self.update()
# Close the channel.
async def close(self):
Necrobot().unregister_bot_channel(self._channel)
await self._channel.delete()
# Makes a rematch of this race if the current race is finished
async def make_rematch(self):
if self._current_race.complete:
await self._make_new_race()
# Alerts unready users
async def poke(self):
if self._nopoke or not self._current_race or not self._current_race.before_race:
return
ready_racers = []
unready_racers = []
for racer in self._current_race.racers:
if racer.is_ready:
ready_racers.append(racer)
else:
unready_racers.append(racer)
num_unready = len(unready_racers)
quorum = (num_unready == 1) or (3*num_unready <= len(ready_racers))
if ready_racers and quorum:
self._nopoke = True
alert_string = ''
for racer in unready_racers:
alert_string += racer.member.mention + ', '
await self.write('Poking {0}.'.format(alert_string[:-2]))
asyncio.ensure_future(self._run_nopoke_delay())
# Private -----------------------------------------------------------------
# Makes a new Race (and stores the previous one in self._previous race)
async def _make_new_race(self):
# Make the race
self._race_number += 1
self._previous_race = self._current_race
self._current_race = Race(self, self.race_info)
await self._current_race.initialize()
await self.update()
# Send @mention message
self._mentioned_users = []
mention_text = ''
for user in self._mention_on_new_race:
mention_text += user.mention + ' '
self._mentioned_users.append(user)
self._mention_on_new_race = []
if self.race_info.seeded:
await self._channel.send(
'{0}\nRace number {1} is open for entry. Seed: {2}.'.format(
mention_text, self._race_number, self.current_race.race_info.seed))
else:
await self._channel.send(
'{0}\nRace number {1} is open for entry.'.format(mention_text, self._race_number))
# Checks to see whether the room should be cleaned.
async def _monitor_for_cleanup(self):
while True:
await asyncio.sleep(30) # Wait between check times
# No race object
if self._current_race is None:
await self.close()
return
# Pre-race
elif self._current_race.before_race:
if not self._current_race.any_entrants:
if self._current_race.passed_no_entrants_cleanup_time:
await self.close()
return
elif self._current_race.passed_no_entrants_warning_time:
await self.write('Warning: Race has had zero entrants for some time and will be closed soon.')
# Post-race
elif self._current_race.complete:
async for msg in self._channel.history(limit=1):
if (datetime.datetime.utcnow() - msg.created_at) > Config.CLEANUP_TIME:
await self.close()
return
# Implements a delay before pokes can happen again
async def _run_nopoke_delay(self):
await asyncio.sleep(Config.RACE_POKE_DELAY)
self._nopoke = False
| mit |
morphis/home-assistant | homeassistant/util/temperature.py | 18 | 1188 | """Temperature util functions."""
from homeassistant.const import (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
UNIT_NOT_RECOGNIZED_TEMPLATE,
TEMPERATURE
)
def fahrenheit_to_celsius(fahrenheit: float) -> float:
"""Convert a Fahrenheit temperature to Celsius."""
return (fahrenheit - 32.0) / 1.8
def celsius_to_fahrenheit(celsius: float) -> float:
"""Convert a Celsius temperature to Fahrenheit."""
return celsius * 1.8 + 32.0
def convert(temperature: float, from_unit: str, to_unit: str) -> float:
"""Convert a temperature from one unit to another."""
if from_unit not in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(from_unit,
TEMPERATURE))
if to_unit not in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(to_unit,
TEMPERATURE))
if from_unit == to_unit:
return temperature
elif from_unit == TEMP_CELSIUS:
return celsius_to_fahrenheit(temperature)
else:
return fahrenheit_to_celsius(temperature)
| apache-2.0 |
hoverinc/three.js | utils/converters/fbx/convert_to_threejs.py | 16 | 76649 | # @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
import re
import json
import types
import shutil
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_prefix = True
option_geometry = False
option_default_camera = False
option_default_light = False
option_pretty_print = False
converter = None
inputFolder = ""
outputFolder = ""
# #####################################################
# Pretty Printing Hacks
# #####################################################
# Force an array to be printed fully on a single line
class NoIndent(object):
def __init__(self, value, separator = ','):
self.separator = separator
self.value = value
def encode(self):
if not self.value:
return None
return '[ %s ]' % self.separator.join(str(f) for f in self.value)
# Force an array into chunks rather than printing each element on a new line
class ChunkedIndent(object):
def __init__(self, value, chunk_size = 15, force_rounding = False):
self.value = value
self.size = chunk_size
self.force_rounding = force_rounding
def encode(self):
# Turn the flat array into an array of arrays where each subarray is of
# length chunk_size. Then string concat the values in the chunked
# arrays, delimited with a ', ' and round the values finally append
# '{CHUNK}' so that we can find the strings with regex later
if not self.value:
return None
if self.force_rounding:
return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
else:
return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
# This custom encoder looks for instances of NoIndent or ChunkedIndent.
# When it finds
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
return obj.encode()
else:
return json.JSONEncoder.default(self, obj)
def executeRegexHacks(output_string):
# turn strings of arrays into arrays (remove the double quotes)
output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
# replace '0metadata' with metadata
output_string = re.sub('0metadata', r'metadata', output_string)
# replace 'zchildren' with children
output_string = re.sub('zchildren', r'children', output_string)
# add an extra newline after '"children": {'
output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
# add an extra newline after '},'
output_string = re.sub('},\s*\n', r'},\n\n', output_string)
# add an extra newline after '\n\s*],'
output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
return output_string
# #####################################################
# Object Serializers
# #####################################################
# FbxVector2 is not JSON serializable
def serializeVector2(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5))
if option_pretty_print:
return NoIndent([v[0], v[1]], ', ')
else:
return [v[0], v[1]]
# FbxVector3 is not JSON serializable
def serializeVector3(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2]], ', ')
else:
return [v[0], v[1], v[2]]
# FbxVector4 is not JSON serializable
def serializeVector4(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if math.isnan(v[3]) or math.isinf(v[3]):
v[3] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2], v[3]], ', ')
else:
return [v[0], v[1], v[2], v[3]]
# #####################################################
# Helpers
# #####################################################
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return int(color)
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
tmp = []
for uv in uvs:
tmp.append(uv[0])
tmp.append(uv[1])
if option_pretty_print:
layer = ChunkedIndent(tmp)
else:
layer = tmp
layers.append(layer)
return layers
# #####################################################
# Object Name Helpers
# #####################################################
def hasUniqueName(o, class_id):
scene = o.GetScene()
object_name = o.GetName()
object_id = o.GetUniqueID()
object_count = scene.GetSrcObjectCount(class_id)
for i in range(object_count):
other = scene.GetSrcObject(class_id, i)
other_id = other.GetUniqueID()
other_name = other.GetName()
if other_id == object_id:
continue
if other_name == object_name:
return False
return True
def getObjectName(o, force_prefix = False):
if not o:
return ""
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxNode.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % object_id
return prefix + object_name
def getMaterialName(o, force_prefix = False):
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % object_id
return prefix + object_name
def getTextureName(t, force_prefix = False):
if type(t) is FbxFileTexture:
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
else:
texture_id = t.GetName()
if texture_id == "_empty_":
texture_id = ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
if len(texture_id) == 0:
prefix = prefix[0:len(prefix)-1]
return prefix + texture_id
def getMtlTextureName(texture_name, texture_id, force_prefix = False):
texture_name = os.path.splitext(texture_name)[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % texture_id
return prefix + texture_name
def getPrefixedName(o, prefix):
return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
# #####################################################
# Triangulation
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate Material Object
# #####################################################
def generate_texture_bindings(material_property, material_params):
# FBX to Three.js texture types
binding_types = {
"DiffuseColor": "map",
"DiffuseFactor": "diffuseFactor",
"EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor",
"AmbientColor": "ambientMap",
"AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap",
"SpecularFactor": "specularFactor",
"ShininessExponent": "shininessExponent",
"NormalMap": "normalMap",
"Bump": "bumpMap",
"TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor",
"ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor",
"DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
def generate_material_object(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = None
material_params = None
material_type = None
if implementation:
print("Shader materials are not supported")
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
specular = getHex(material.Specular.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
shininess = material.Shininess.Get()
transparent = False
reflectivity = 1
bumpScale = 1
material_type = 'MeshPhongMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'specular' : specular,
'shininess' : shininess,
'bumpScale' : bumpScale,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
else:
print "Unknown type of Material", getMaterialName(material)
# default to Lambert Material if the current Material type cannot be handeled
if not material_type:
ambient = getHex((0,0,0))
diffuse = getHex((0.5,0.5,0.5))
emissive = getHex((0,0,0))
opacity = 1
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
if option_textures:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, material_params)
material_params['wireframe'] = False
material_params['wireframeLinewidth'] = 1
output = {
'type' : material_type,
'parameters' : material_params
}
return output
def generate_proxy_material_object(node, material_names):
material_type = 'MeshFaceMaterial'
material_params = {
'materials' : material_names
}
output = {
'type' : material_type,
'parameters' : material_params
}
return output
# #####################################################
# Find Scene Materials
# #####################################################
def extract_materials_from_node(node, material_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
if material_count > 1:
proxy_material = generate_proxy_material_object(node, material_names)
proxy_name = getMaterialName(node, True)
material_dict[proxy_name] = proxy_material
def generate_materials_from_hierarchy(node, material_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_dict)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
def generate_material_dict(scene):
material_dict = {}
# generate all materials for this scene
material_count = scene.GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for i in range(material_count):
material = scene.GetSrcObject(FbxSurfaceMaterial.ClassId, i)
material_object = generate_material_object(material)
material_name = getMaterialName(material)
material_dict[material_name] = material_object
# generate material porxies
# Three.js does not support meshs with multiple materials, however it does
# support materials with multiple submaterials
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
return material_dict
# #####################################################
# Generate Texture Object
# #####################################################
def generate_texture_object(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
if type(texture) is FbxFileTexture:
url = texture.GetFileName()
else:
url = getTextureName( texture )
url = replace_inFolder2OutFolder( url )
output = {
'url': url,
'repeat': serializeVector2( (1,1) ),
'offset': serializeVector2( texture.GetUVTranslation() ),
'magFilter': 'LinearFilter',
'minFilter': 'LinearMipMapLinearFilter',
'anisotropy': True
}
return output
# #####################################################
# Replace Texture input path to output
# #####################################################
def replace_inFolder2OutFolder(url):
folderIndex = url.find(inputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(inputFolder): ]
url = outputFolder + url
return url
# #####################################################
# Replace Texture output path to input
# #####################################################
def replace_OutFolder2inFolder(url):
folderIndex = url.find(outputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(outputFolder): ]
url = inputFolder + url
return url
# #####################################################
# Find Scene Textures
# #####################################################
def extract_material_textures(material_property, texture_dict):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
def extract_textures_from_node(node, texture_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_dict)
def generate_textures_from_hierarchy(node, texture_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_dict)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
def generate_texture_dict(scene):
if not option_textures:
return {}
texture_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
return texture_dict
# #####################################################
# Extract Fbx SDK Mesh Data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
tmp = control_points[i]
tmp = [tmp[0], tmp[1], tmp[2]]
positions.append(tmp)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = [position[0], position[1], position[2]]
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = normals_array.GetAt(i)
normal = [normal[0], normal[1], normal[2]]
normal_values.append(normal)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
t = FbxVector4(0,0,0,1)
transform.SetRow(3, t)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal.Normalize()
normal = [normal[0], normal[1], normal[2]]
normal_values[i] = normal
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
# mapping mode is by control points. The mesh should be smooth and soft.
# we can get normals by retrieving each control point
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
# reference mode is direct, the normal index is same as vertex index.
# get normals by the index of control vertex
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
# mapping mode is by polygon-vertex.
# we can get normals by retrieving polygon-vertex.
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = colors_array.GetAt(i)
color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_indices.append( color_indices )
layered_color_values.append( color_values )
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
'''
# The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
# This causes most models to receive incorrect vertex colors
if len(color_values) == 0:
color = mesh.Color.Get()
color_values = [[color[0], color[1], color[2]]]
color_indices = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
color_indices.append([0] * poly_size)
'''
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = uvs_array.GetAt(i)
uv = [uv[0], uv[1]]
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Process Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = ncolors
ncolors += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
flipWindingOrder = False
node = mesh.GetNode()
if node:
local_scale = node.EvaluateLocalScaling()
if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
flipWindingOrder = True
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
if poly_size > 4:
new_face_normals = []
new_face_colors = []
new_face_uv_layers = []
for i in range(poly_size - 2):
new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
if len(face_normals):
new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
if len(face_colors):
new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
if len(face_uv_layers):
new_face_uv_layers = []
for layer in face_uv_layers:
new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
face = generate_mesh_face(mesh,
poly_index,
new_face_vertices,
new_face_normals,
new_face_colors,
new_face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
else:
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
if flipOrder:
if nVertices == 3:
vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
uv_layers = tmp
else:
vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[3], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[3], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
uv_layers = tmp
for i in range(nVertices):
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return faceData
# #####################################################
# Generate Mesh Object (for scene output format)
# #####################################################
def generate_scene_output(node):
mesh = node.GetNodeAttribute()
# This is done in order to keep the scene output and non-scene output code DRY
mesh_list = [ mesh ]
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable automatic json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = ChunkedIndent(vertices, 15, True)
normal_values = ChunkedIndent(normal_values, 15, True)
color_values = ChunkedIndent(color_values, 15)
faces = ChunkedIndent(faces, 30)
metadata = {
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Mesh Object (for non-scene output)
# #####################################################
def generate_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = NoIndent(vertices)
normal_values = NoIndent(normal_values)
color_values = NoIndent(color_values)
faces = NoIndent(faces)
metadata = {
'formatVersion' : 3,
'type' : 'geometry',
'generatedBy' : 'convert-to-threejs.py',
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate Embed Objects
# #####################################################
def generate_embed_dict_from_hierarchy(node, embed_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_object = generate_scene_output(node)
embed_name = getPrefixedName(node, 'Embed')
embed_dict[embed_name] = embed_object
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
def generate_embed_dict(scene):
embed_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
return embed_dict
# #####################################################
# Generate Geometry Objects
# #####################################################
def generate_geometry_object(node):
output = {
'type' : 'embedded',
'id' : getPrefixedName( node, 'Embed' )
}
return output
def generate_geometry_dict_from_hierarchy(node, geometry_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_object = generate_geometry_object(node)
geometry_name = getPrefixedName( node, 'Geometry' )
geometry_dict[geometry_name] = geometry_object
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
def generate_geometry_dict(scene):
geometry_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
return geometry_dict
# #####################################################
# Generate Light Node Objects
# #####################################################
def generate_default_light():
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = {
'type': 'DirectionalLight',
'color': getHex(color),
'intensity': intensity/100.00,
'direction': serializeVector3( direction ),
'target': getObjectName( None )
}
return output
def generate_light_object(node):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = None
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
output = {
'type': 'DirectionalLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'direction': serializeVector3( direction ),
'target': getObjectName( node.GetTarget() )
}
elif light_type == "point":
output = {
'type': 'PointLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get()
}
elif light_type == "spot":
output = {
'type': 'SpotLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get(),
'angle': light.OuterAngle.Get()*math.pi/180,
'exponent': light.DecayType.Get(),
'target': getObjectName( node.GetTarget() )
}
return output
def generate_ambient_light(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
output = {
'type': 'AmbientLight',
'color': getHex(ambient_color)
}
return output
# #####################################################
# Generate Camera Node Objects
# #####################################################
def generate_default_camera():
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
def generate_camera_object(node):
camera = node.GetNodeAttribute()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
name = getObjectName( node )
output = {}
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'aspect': aspect,
'near': near,
'far': far,
'position': serializeVector3( position )
}
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = {
'type': 'PerspectiveCamera',
'left': left,
'right': right,
'top': top,
'bottom': bottom,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
# #####################################################
# Generate Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate Mesh Node Object
# #####################################################
def generate_mesh_object(node):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
if not material_count > 1 and not len(material_names) > 0:
material_names.append('')
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = {
'geometry': getPrefixedName( node, 'Geometry' ),
'material': material_name,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True,
}
return output
# #####################################################
# Generate Node Object
# #####################################################
def generate_object(node):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
name = getObjectName( node )
output = {
'fbx_type': node_type,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True
}
return output
# #####################################################
# Parse Scene Node Objects
# #####################################################
def generate_object_hierarchy(node, object_dict):
object_count = 0
if node.GetNodeAttribute() == None:
object_data = generate_object(node)
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_data = generate_mesh_object(node)
elif attribute_type == FbxNodeAttribute.eLight:
object_data = generate_light_object(node)
elif attribute_type == FbxNodeAttribute.eCamera:
object_data = generate_camera_object(node)
else:
object_data = generate_object(node)
object_count += 1
object_name = getObjectName(node)
object_children = {}
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_children)
if node.GetChildCount() > 0:
# Having 'children' above other attributes is hard to read.
# We can send it to the bottom using the last letter of the alphabet 'z'.
# This letter is removed from the final output.
if option_pretty_print:
object_data['zchildren'] = object_children
else:
object_data['children'] = object_children
object_dict[object_name] = object_data
return object_count
def generate_scene_objects(scene):
object_count = 0
object_dict = {}
ambient_light = generate_ambient_light(scene)
if ambient_light:
object_dict['AmbientLight'] = ambient_light
object_count += 1
if option_default_light:
default_light = generate_default_light()
object_dict['DefaultLight'] = default_light
object_count += 1
if option_default_camera:
default_camera = generate_default_camera()
object_dict['DefaultCamera'] = default_camera
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
return object_dict, object_count
# #####################################################
# Generate Scene Output
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects(scene)
textures = generate_texture_dict(scene)
materials = generate_material_dict(scene)
geometries = generate_geometry_dict(scene)
embeds = generate_embed_dict(scene)
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = serializeVector3( (0,0,0) )
rotation = serializeVector3( (0,0,0) )
scale = serializeVector3( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = camera_names[0] if len(camera_names) > 0 else ""
if option_default_camera:
defcamera = 'default_camera'
metadata = {
'formatVersion': 3.2,
'type': 'scene',
'generatedBy': 'convert-to-threejs.py',
'objects': nobjects,
'geometries': ngeometries,
'materials': nmaterials,
'textures': ntextures
}
transform = {
'position' : position,
'rotation' : rotation,
'scale' : scale
}
defaults = {
'bgcolor' : 0,
'camera' : defcamera,
'fog' : ''
}
output = {
'objects': objects,
'geometries': geometries,
'materials': materials,
'textures': textures,
'embeds': embeds,
'transform': transform,
'defaults': defaults,
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Non-Scene Output
# #####################################################
def extract_geometry(scene, filename):
output = generate_non_scene_output(scene)
return output
# #####################################################
# File Helpers
# #####################################################
def write_file(filepath, content):
index = filepath.rfind('/')
dir = filepath[0:index]
if not os.path.exists(dir):
os.makedirs(dir)
out = open(filepath, "w")
out.write(content.encode('utf8', 'replace'))
out.close()
def read_file(filepath):
f = open(filepath)
content = f.readlines()
f.close()
return content
def copy_textures(textures):
texture_dict = {}
for key in textures:
url = textures[key]['url']
src = replace_OutFolder2inFolder(url)
if url in texture_dict: # texture has been copied
continue
if not os.path.exists(src):
print("copy_texture error: we can't find this texture at " + src)
continue
try:
index = url.rfind('/')
folder = url[0:index]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
shutil.copyfile(src, url)
texture_dict[url] = True
except IOError as e:
print "I/O error({0}): {1} {2}".format(e.errno, e.strerror, src)
def findFilesWithExt(directory, ext, include_path = True):
ext = ext.lower()
found = []
for root, dirs, files in os.walk(directory):
for filename in files:
current_ext = os.path.splitext(filename)[1].lower()
if current_ext == ext:
if include_path:
found.append(os.path.join(root, filename))
else:
found.append(filename)
return found
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_prefix = options.prefix
option_geometry = options.geometry
option_default_camera = options.defcamera
option_default_light = options.deflight
option_pretty_print = options.pretty
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
# According to asset's coordinate to convert scene
upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
axis_system = FbxAxisSystem.MayaYUp
if upVector[0] == 3:
axis_system = FbxAxisSystem.MayaZUp
axis_system.ConvertScene(scene)
inputFolder = args[0].replace( "\\", "/" );
index = args[0].rfind( "/" );
inputFolder = inputFolder[:index]
outputFolder = args[1].replace( "\\", "/" );
index = args[1].rfind( "/" );
outputFolder = outputFolder[:index]
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
if option_pretty_print:
output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
output_string = executeRegexHacks(output_string)
else:
output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_string)
copy_textures( output_content['textures'] )
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
| mit |
fidodaj/project2 | server/lib/werkzeug/debug/tbtools.py | 311 | 16785 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does not by
accident trigger a request to /favicon.ico which might change the application
state. -->
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = ''.join(buf).strip()
return rv.decode('utf-8', 'replace') if PY2 else rv
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u'\n'
if PY2:
tb.encode('utf-8', 'replace')
logfile.write(tb)
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps({
'description': 'Werkzeug Internal Server Error',
'public': False,
'files': {
'traceback.txt': {
'content': self.plaintext
}
}
}).encode('utf-8')
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen('https://api.github.com/gists', data=data)
resp = json.loads(rv.read().decode('utf-8'))
rv.close()
return {
'url': resp['html_url'],
'id': resp['id']
}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, secret=None):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = text_type(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
self.get_annotated_lines())
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, unicode):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
| apache-2.0 |
wangscript/libjingle-1 | trunk/tools/python/google/path_utils.py | 191 | 2910 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Some utility methods for getting and manipulating paths."""
# TODO(pamg): Have the buildbot use these, too.
import errno
import os
import sys
class PathNotFound(Exception): pass
def ScriptDir():
"""Get the full path to the directory containing the current script."""
script_filename = os.path.abspath(sys.argv[0])
return os.path.dirname(script_filename)
def FindAncestor(start_dir, ancestor):
"""Finds an ancestor dir in a path.
For example, FindAncestor('c:\foo\bar\baz', 'bar') would return
'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors.
"""
start_dir = os.path.abspath(start_dir)
path = start_dir
while True:
(parent, tail) = os.path.split(path)
if tail == ancestor:
return path
if not tail:
break
path = parent
raise PathNotFound("Unable to find ancestor %s in %s" % (ancestor, start_dir))
def FindUpwardParent(start_dir, *desired_list):
"""Finds the desired object's parent, searching upward from the start_dir.
Searches start_dir and all its parents looking for the desired directory
or file, which may be given in one or more path components. Returns the
first directory in which the top desired path component was found, or raises
PathNotFound if it wasn't.
"""
desired_path = os.path.join(*desired_list)
last_dir = ''
cur_dir = start_dir
found_path = os.path.join(cur_dir, desired_path)
while not os.path.exists(found_path):
last_dir = cur_dir
cur_dir = os.path.dirname(cur_dir)
if last_dir == cur_dir:
raise PathNotFound('Unable to find %s above %s' %
(desired_path, start_dir))
found_path = os.path.join(cur_dir, desired_path)
# Strip the entire original desired path from the end of the one found
# and remove a trailing path separator, if present.
found_path = found_path[:len(found_path) - len(desired_path)]
if found_path.endswith(os.sep):
found_path = found_path[:len(found_path) - 1]
return found_path
def FindUpward(start_dir, *desired_list):
"""Returns a path to the desired directory or file, searching upward.
Searches start_dir and all its parents looking for the desired directory
or file, which may be given in one or more path components. Returns the full
path to the desired object, or raises PathNotFound if it wasn't found.
"""
parent = FindUpwardParent(start_dir, *desired_list)
return os.path.join(parent, *desired_list)
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError, e:
# errno.EEXIST is "File exists". If we see another error, re-raise.
if e.errno != errno.EEXIST:
raise
| bsd-3-clause |
uclouvain/osis | education_group/views/access_requirements/create.py | 1 | 3536 | #
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.conf import settings
from django.contrib.messages.views import SuccessMessageMixin
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView
from base.business.education_groups.access_requirements import can_postpone_access_requirements
from base.models.admission_condition import AdmissionConditionLine
from base.models.enums.access_requirements_sections import ConditionSectionsTypes
from base.views.mixins import AjaxTemplateMixin
from education_group.forms.admission_condition import CreateLineEnglishForm, \
CreateLineFrenchForm
from education_group.views.access_requirements.common import AccessRequirementsMixin
from osis_role.contrib.views import PermissionRequiredMixin
class CreateAccessRequirementsLine(SuccessMessageMixin, AccessRequirementsMixin, PermissionRequiredMixin,
AjaxTemplateMixin, CreateView):
template_name = "education_group_app/access_requirements/line_edit.html"
raise_exception = True
force_reload = True
model = AdmissionConditionLine
def get_permission_object(self):
return self.get_admission_condition_object().education_group_year
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["can_postpone"] = can_postpone_access_requirements(
self.get_admission_condition_object().education_group_year
)
context["section"] = ConditionSectionsTypes.get_value(self.request.GET["section"])
return context
def get_initial(self):
initial = super().get_initial()
initial["section"] = self.request.GET["section"]
return initial
def form_valid(self, form):
form.instance.admission_condition = self.get_admission_condition_object()
return super().form_valid(form)
def get_form_class(self):
language = self.request.GET['language']
if language == settings.LANGUAGE_CODE_EN:
return CreateLineEnglishForm
return CreateLineFrenchForm
def get_success_url(self):
return ""
def get_success_message(self, cleaned_data):
if self.request.POST.get('to_postpone'):
return _("Condition has been created (with postpone)")
return _("Condition has been created (without postpone)")
| agpl-3.0 |
dpetzold/django | tests/middleware/tests.py | 13 | 34932 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import random
import re
from io import BytesIO
from unittest import skipIf
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import (
FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound,
HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse,
)
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import (
BrokenLinkEmailsMiddleware, CommonMiddleware,
)
from django.middleware.gzip import GZipMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import quote
@override_settings(ROOT_URLCONF='middleware.urls')
class CommonMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/slash/')
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/noslash')
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/unknown')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/slash/')
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring(self):
"""
APPEND_SLASH should preserve querystrings when redirecting.
"""
request = self.rf.get('/slash?test=1')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.url, '/slash/?test=1')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST, PUT, or PATCH to an URL which
would normally be redirected to a slashed version.
"""
msg = "maintaining %s data. Change your form to point to testserver/slash/"
request = self.rf.get('/slash')
request.method = 'POST'
response = HttpResponseNotFound()
with six.assertRaisesRegex(self, RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PUT'
with six.assertRaisesRegex(self, RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PATCH'
with six.assertRaisesRegex(self, RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
URLs which require quoting should be redirected to their slash version ok.
"""
request = self.rf.get(quote('/needsquoting#'))
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self.rf.get('/path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self.rf.get('/slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom URLconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
request.method = 'POST'
response = HttpResponseNotFound()
with six.assertRaisesRegex(self, RuntimeError, 'end in a slash'):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
URLs which require quoting should be redirected to their slash version ok.
"""
request = self.rf.get(quote('/customurlconf/needsquoting#'))
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'/customurlconf/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www_custom_urlconf(self):
request = self.rf.get('/customurlconf/path/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/customurlconf/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/customurlconf/slash/')
# Other tests
@override_settings(DISALLOWED_USER_AGENTS=[re.compile(r'foo')])
def test_disallowed_user_agents(self):
request = self.rf.get('/slash')
request.META['HTTP_USER_AGENT'] = 'foo'
with self.assertRaisesMessage(PermissionDenied, 'Forbidden user agent'):
CommonMiddleware().process_request(request)
def test_non_ascii_query_string_does_not_crash(self):
"""Regression test for #15152"""
request = self.rf.get('/slash')
request.META['QUERY_STRING'] = force_str('drink=café')
r = CommonMiddleware().process_request(request)
self.assertIsNone(r)
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
def test_response_redirect_class(self):
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponsePermanentRedirect)
def test_response_redirect_class_subclass(self):
class MyCommonMiddleware(CommonMiddleware):
response_redirect_class = HttpResponseRedirect
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = MyCommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponseRedirect)
@override_settings(
IGNORABLE_404_URLS=[re.compile(r'foo')],
MANAGERS=['[email protected]'],
)
class BrokenLinkEmailsMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.req = self.rf.get('/regular_url/that/does/not/exist')
self.resp = self.client.get(self.req.path)
def test_404_error_reporting(self):
self.req.META['HTTP_REFERER'] = '/another/url/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = 'foo_url/that/does/not/exist'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@skipIf(six.PY3, "HTTP_REFERER is str type on Python 3")
def test_404_error_nonascii_referrer(self):
# Such referer strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = b'http://testserver/c/\xd0\xbb\xd0\xb8/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@skipIf(six.PY3, "HTTP_USER_AGENT is str type on Python 3")
def test_404_error_nonascii_user_agent(self):
# Such user agent strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = b'\xd0\xbb\xd0\xb8\xff\xff'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('User agent: \u043b\u0438\ufffd\ufffd\n', mail.outbox[0].body)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (re.compile(r'Spider.*'),
re.compile(r'Robot.*'))
def is_ignorable_request(self, request, uri, domain, referer):
'''Check user-agent in addition to normal checks.'''
if super(SubclassedMiddleware, self).is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META['HTTP_USER_AGENT']
return any(pattern.search(user_agent) for pattern in
self.ignored_user_agent_patterns)
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = 'Spider machine 3.4'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
self.req.META['HTTP_USER_AGENT'] = 'My user agent'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
def test_referer_equal_to_requested_url(self):
"""
Some bots set the referer to the current URL to avoid being blocked by
an referer check (#25302).
"""
self.req.META['HTTP_REFERER'] = self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
# URL with scheme and domain should also be ignored
self.req.META['HTTP_REFERER'] = 'http://testserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_referer_equal_to_requested_url_on_another_domain(self):
self.req.META['HTTP_REFERER'] = 'http://anotherserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ROOT_URLCONF='middleware.cond_get_urls')
class ConditionalGetMiddlewareTest(SimpleTestCase):
def setUp(self):
self.req = RequestFactory().get('/')
self.resp = self.client.get(self.req.path_info)
# Tests for the Date header
def test_date_header_added(self):
self.assertNotIn('Date', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Date', self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
self.assertNotIn('Content-Length', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Content-Length', self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_added(self):
resp = StreamingHttpResponse('content')
self.assertNotIn('Content-Length', resp)
resp = ConditionalGetMiddleware().process_response(self.req, resp)
self.assertNotIn('Content-Length', resp)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_redirect(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_none_match_and_client_error(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
@override_settings(USE_ETAGS=True)
def test_etag(self):
req = HttpRequest()
res = HttpResponse('content')
self.assertTrue(
CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
res['ETag'] = 'tomatoes'
self.assertEqual(
CommonMiddleware().process_response(req, res).get('ETag'),
'tomatoes')
@override_settings(USE_ETAGS=True)
def test_no_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
self.assertFalse(
CommonMiddleware().process_response(req, res).has_header('ETag'))
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_redirect(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_modified_since_and_client_error(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
class XFrameOptionsMiddlewareTest(SimpleTestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to SAMEORIGIN to
have the middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='sameorigin'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to DENY to
have the middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
with override_settings(X_FRAME_OPTIONS='deny'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
Tests that if the X_FRAME_OPTIONS setting is not set then it defaults
to SAMEORIGIN.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
Tests that if the X-Frame-Options header is already set then the
middleware does not attempt to override it.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
Tests that if the response has a xframe_options_exempt attribute set
to False then it still sets the header, but if it's set to True then
it does not.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r.get('X-Frame-Options', None), None)
def test_is_extendable(self):
"""
Tests that the XFrameOptionsMiddleware method that determines the
X-Frame-Options header value can be overridden based on something in
the request or response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request,
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
class GZipMiddlewareTest(SimpleTestCase):
"""
Tests the GZip middleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b'a' * 500
uncompressible_string = b''.join(six.int2byte(random.randint(0, 255)) for _ in range(500))
sequence = [b'a' * 500, b'b' * 200, b'a' * 300]
sequence_unicode = ['a' * 500, 'é' * 200, 'a' * 300]
def setUp(self):
self.req = RequestFactory().get('/')
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp_unicode = StreamingHttpResponse(self.sequence_unicode)
self.stream_resp_unicode['Content-Type'] = 'text/html; charset=UTF-8'
@staticmethod
def decompress(gzipped_string):
with gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)) as f:
return f.read()
def test_compress_response(self):
"""
Tests that compression is performed on responses with compressible content.
"""
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Tests that compression is performed on responses with streaming content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp)
self.assertEqual(self.decompress(b''.join(r)), b''.join(self.sequence))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_streaming_response_unicode(self):
"""
Tests that compression is performed on responses with streaming Unicode content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp_unicode)
self.assertEqual(self.decompress(b''.join(r)), b''.join(x.encode('utf-8') for x in self.sequence_unicode))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_file_response(self):
"""
Tests that compression is performed on FileResponse.
"""
open_file = lambda: open(__file__, 'rb')
with open_file() as file1:
file_resp = FileResponse(file1)
file_resp['Content-Type'] = 'text/html; charset=UTF-8'
r = GZipMiddleware().process_response(self.req, file_resp)
with open_file() as file2:
self.assertEqual(self.decompress(b''.join(r)), file2.read())
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertIsNot(r.file_to_stream, file1)
def test_compress_non_200_response(self):
"""
Tests that compression is performed on responses with a status other than 200.
See #10762.
"""
self.resp.status_code = 404
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
def test_no_compress_short_response(self):
"""
Tests that compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.short_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_no_compress_compressed_response(self):
"""
Tests that compression isn't performed on responses that are already compressed.
"""
self.resp['Content-Encoding'] = 'deflate'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'deflate')
def test_no_compress_uncompressible_response(self):
"""
Tests that compression isn't performed on responses with uncompressible content.
"""
self.resp.content = self.uncompressible_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.uncompressible_string)
self.assertEqual(r.get('Content-Encoding'), None)
@override_settings(USE_ETAGS=True)
class ETagGZipMiddlewareTest(SimpleTestCase):
"""
Tests if the ETag middleware behaves correctly with GZip middleware.
"""
rf = RequestFactory()
compressible_string = b'a' * 500
def test_compress_response(self):
"""
Tests that ETag is changed after gzip compression is performed.
"""
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
gzip_etag = response.get('ETag')
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
nogzip_etag = response.get('ETag')
self.assertNotEqual(gzip_etag, nogzip_etag)
| bsd-3-clause |
leviroth/praw | praw/models/reddit/subreddit.py | 1 | 101559 | """Provide the Subreddit class."""
# pylint: disable=too-many-lines
from copy import deepcopy
from json import dumps, loads
from os.path import basename, dirname, join
from urllib.parse import urljoin
from prawcore import Redirect
import websocket
from ...const import API_PATH, JPEG_HEADER
from ...exceptions import APIException, ClientException
from ...util.cache import cachedproperty
from ..util import permissions_string, stream_generator
from ..listing.generator import ListingGenerator
from ..listing.mixins import SubredditListingMixin
from .base import RedditBase
from .emoji import SubredditEmoji
from .mixins import FullnameMixin, MessageableMixin
from .modmail import ModmailConversation
from .widgets import SubredditWidgets
from .wikipage import WikiPage
class Subreddit(
MessageableMixin, SubredditListingMixin, FullnameMixin, RedditBase
):
"""A class for Subreddits.
To obtain an instance of this class for subreddit ``/r/redditdev`` execute:
.. code:: python
subreddit = reddit.subreddit('redditdev')
While ``/r/all`` is not a real subreddit, it can still be treated like
one. The following outputs the titles of the 25 hottest submissions in
``/r/all``:
.. code:: python
for submission in reddit.subreddit('all').hot(limit=25):
print(submission.title)
Multiple subreddits can be combined like so:
.. code:: python
for submission in reddit.subreddit('redditdev+learnpython').top('all'):
print(submission)
Subreddits can be filtered from combined listings as follows. Note that
these filters are ignored by certain methods, including
:attr:`~praw.models.Subreddit.comments`,
:meth:`~praw.models.Subreddit.gilded`, and
:meth:`.SubredditStream.comments`.
.. code:: python
for submission in reddit.subreddit('all-redditdev').new():
print(submission)
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
========================== ===============================================
Attribute Description
========================== ===============================================
``can_assign_link_flair`` Whether users can assign their own link flair.
``can_assign_user_flair`` Whether users can assign their own user flair.
``created_utc`` Time the subreddit was created, represented in
`Unix Time`_.
``description`` Subreddit description, in Markdown.
``description_html`` Subreddit description, in HTML.
``display_name`` Name of the subreddit.
``id`` ID of the subreddit.
``name`` Fullname of the subreddit.
``over18`` Whether the subreddit is NSFW.
``public_description`` Description of the subreddit, shown in searches
and on the "You must be invited to visit this
community" page (if applicable).
``spoilers_enabled`` Whether the spoiler tag feature is enabled.
``subscribers`` Count of subscribers.
``user_is_banned`` Whether the authenticated user is banned.
``user_is_moderator`` Whether the authenticated user is a moderator.
``user_is_subscriber`` Whether the authenticated user is subscribed.
========================== ===============================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
# pylint: disable=too-many-public-methods
STR_FIELD = "display_name"
MESSAGE_PREFIX = "#"
@staticmethod
def _create_or_update(
_reddit,
allow_images=None,
allow_post_crossposts=None,
allow_top=None,
collapse_deleted_comments=None,
comment_score_hide_mins=None,
description=None,
domain=None,
exclude_banned_modqueue=None,
header_hover_text=None,
hide_ads=None,
lang=None,
key_color=None,
link_type=None,
name=None,
over_18=None,
public_description=None,
public_traffic=None,
show_media=None,
show_media_preview=None,
spam_comments=None,
spam_links=None,
spam_selfposts=None,
spoilers_enabled=None,
sr=None,
submit_link_label=None,
submit_text=None,
submit_text_label=None,
subreddit_type=None,
suggested_comment_sort=None,
title=None,
wiki_edit_age=None,
wiki_edit_karma=None,
wikimode=None,
**other_settings
):
# pylint: disable=invalid-name,too-many-locals,too-many-arguments
model = {
"allow_images": allow_images,
"allow_post_crossposts": allow_post_crossposts,
"allow_top": allow_top,
"collapse_deleted_comments": collapse_deleted_comments,
"comment_score_hide_mins": comment_score_hide_mins,
"description": description,
"domain": domain,
"exclude_banned_modqueue": exclude_banned_modqueue,
"header-title": header_hover_text, # Remap here - better name
"hide_ads": hide_ads,
"key_color": key_color,
"lang": lang,
"link_type": link_type,
"name": name,
"over_18": over_18,
"public_description": public_description,
"public_traffic": public_traffic,
"show_media": show_media,
"show_media_preview": show_media_preview,
"spam_comments": spam_comments,
"spam_links": spam_links,
"spam_selfposts": spam_selfposts,
"spoilers_enabled": spoilers_enabled,
"sr": sr,
"submit_link_label": submit_link_label,
"submit_text": submit_text,
"submit_text_label": submit_text_label,
"suggested_comment_sort": suggested_comment_sort,
"title": title,
"type": subreddit_type,
"wiki_edit_age": wiki_edit_age,
"wiki_edit_karma": wiki_edit_karma,
"wikimode": wikimode,
}
model.update(other_settings)
_reddit.post(API_PATH["site_admin"], data=model)
@staticmethod
def _subreddit_list(subreddit, other_subreddits):
if other_subreddits:
return ",".join(
[str(subreddit)] + [str(x) for x in other_subreddits]
)
return str(subreddit)
@property
def _kind(self):
"""Return the class's kind."""
return self._reddit.config.kinds["subreddit"]
@cachedproperty
def banned(self):
"""Provide an instance of :class:`.SubredditRelationship`.
For example to ban a user try:
.. code-block:: python
reddit.subreddit('SUBREDDIT').banned.add('NAME', ban_reason='...')
To list the banned users along with any notes, try:
.. code-block:: python
for ban in reddit.subreddit('SUBREDDIT').banned():
print('{}: {}'.format(ban, ban.note))
"""
return SubredditRelationship(self, "banned")
@cachedproperty
def collections(self):
r"""Provide an instance of :class:`.SubredditCollections`.
To see the permalinks of all :class:`.Collection`\ s that belong to
a subreddit, try:
.. code-block:: python
for collection in reddit.subreddit('SUBREDDIT').collections:
print(collection.permalink)
To get a specific :class:`.Collection` by its UUID or permalink,
use one of the following:
.. code-block:: python
collection = reddit.subreddit('SUBREDDIT').collections('some_uuid')
collection = reddit.subreddit('SUBREDDIT').collections(
permalink='https://reddit.com/r/SUBREDDIT/collection/some_uuid')
"""
return self._subreddit_collections_class(self._reddit, self)
@cachedproperty
def contributor(self):
"""Provide an instance of :class:`.ContributorRelationship`.
Contributors are also known as approved submitters.
To add a contributor try:
.. code-block:: python
reddit.subreddit('SUBREDDIT').contributor.add('NAME')
"""
return ContributorRelationship(self, "contributor")
@cachedproperty
def emoji(self):
"""Provide an instance of :class:`.SubredditEmoji`.
This attribute can be used to discover all emoji for a subreddit:
.. code:: python
for emoji in reddit.subreddit('iama').emoji:
print(emoji)
A single emoji can be lazily retrieved via:
.. code:: python
reddit.subreddit('blah').emoji['emoji_name']
.. note:: Attempting to access attributes of an nonexistent emoji will
result in a :class:`.ClientException`.
"""
return SubredditEmoji(self)
@cachedproperty
def filters(self):
"""Provide an instance of :class:`.SubredditFilters`."""
return SubredditFilters(self)
@cachedproperty
def flair(self):
"""Provide an instance of :class:`.SubredditFlair`.
Use this attribute for interacting with a subreddit's flair. For
example to list all the flair for a subreddit which you have the
``flair`` moderator permission on try:
.. code-block:: python
for flair in reddit.subreddit('NAME').flair():
print(flair)
Flair templates can be interacted with through this attribute via:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.templates:
print(template)
"""
return SubredditFlair(self)
@cachedproperty
def mod(self):
"""Provide an instance of :class:`.SubredditModeration`."""
return SubredditModeration(self)
@cachedproperty
def moderator(self):
"""Provide an instance of :class:`.ModeratorRelationship`.
For example to add a moderator try:
.. code-block:: python
reddit.subreddit('SUBREDDIT').moderator.add('NAME')
To list the moderators along with their permissions try:
.. code-block:: python
for moderator in reddit.subreddit('SUBREDDIT').moderator():
print('{}: {}'.format(moderator, moderator.mod_permissions))
"""
return ModeratorRelationship(self, "moderator")
@cachedproperty
def modmail(self):
"""Provide an instance of :class:`.Modmail`."""
return Modmail(self)
@cachedproperty
def muted(self):
"""Provide an instance of :class:`.SubredditRelationship`."""
return SubredditRelationship(self, "muted")
@cachedproperty
def quaran(self):
"""Provide an instance of :class:`.SubredditQuarantine`.
This property is named ``quaran`` because ``quarantine`` is a
Subreddit attribute returned by Reddit to indicate whether or not a
Subreddit is quarantined.
"""
return SubredditQuarantine(self)
@cachedproperty
def stream(self):
"""Provide an instance of :class:`.SubredditStream`.
Streams can be used to indefinitely retrieve new comments made to a
subreddit, like:
.. code:: python
for comment in reddit.subreddit('iama').stream.comments():
print(comment)
Additionally, new submissions can be retrieved via the stream. In the
following example all submissions are fetched via the special subreddit
``all``:
.. code:: python
for submission in reddit.subreddit('all').stream.submissions():
print(submission)
"""
return SubredditStream(self)
@cachedproperty
def stylesheet(self):
"""Provide an instance of :class:`.SubredditStylesheet`."""
return SubredditStylesheet(self)
@cachedproperty
def widgets(self):
"""Provide an instance of :class:`.SubredditWidgets`.
**Example usage**
Get all sidebar widgets:
.. code-block:: python
for widget in reddit.subreddit('redditdev').widgets.sidebar:
print(widget)
Get ID card widget:
.. code-block:: python
print(reddit.subreddit('redditdev').widgets.id_card)
"""
return SubredditWidgets(self)
@cachedproperty
def wiki(self):
"""Provide an instance of :class:`.SubredditWiki`.
This attribute can be used to discover all wikipages for a subreddit:
.. code:: python
for wikipage in reddit.subreddit('iama').wiki:
print(wikipage)
To fetch the content for a given wikipage try:
.. code:: python
wikipage = reddit.subreddit('iama').wiki['proof']
print(wikipage.content_md)
"""
return SubredditWiki(self)
def __init__(self, reddit, display_name=None, _data=None):
"""Initialize a Subreddit instance.
:param reddit: An instance of :class:`~.Reddit`.
:param display_name: The name of the subreddit.
.. note:: This class should not be initialized directly. Instead obtain
an instance via: ``reddit.subreddit('subreddit_name')``
"""
if bool(display_name) == bool(_data):
raise TypeError(
"Either `display_name` or `_data` must be provided."
)
super(Subreddit, self).__init__(reddit, _data=_data)
if display_name:
self.display_name = display_name
self._path = API_PATH["subreddit"].format(subreddit=self)
def _fetch_info(self):
return ("subreddit_about", {"subreddit": self}, None)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
other = type(self)(self._reddit, _data=data)
self.__dict__.update(other.__dict__)
self._fetched = True
def _submit_media(self, data, timeout):
"""Submit and return an `image`, `video`, or `videogif`.
This is a helper method for submitting posts that are not link posts or
self posts.
"""
response = self._reddit.post(API_PATH["submit"], data=data)
# About the websockets:
#
# Reddit responds to this request with only two fields: a link to
# the user's /submitted page, and a websocket URL. We can use the
# websocket URL to get a link to the new post once it is created.
#
# An important note to PRAW contributors or anyone who would
# wish to step through this section with a debugger: This block
# of code is NOT debugger-friendly. If there is *any*
# significant time between the POST request just above this
# comment and the creation of the websocket connection just
# below, the code will become stuck in an infinite loop at the
# socket.recv() call. I believe this is because only one message is
# sent over the websocket, and if the client doesn't connect
# soon enough, it will miss the message and get stuck forever
# waiting for another.
#
# So if you need to debug this section of code, please let the
# websocket creation happen right after the POST request,
# otherwise you will have trouble.
if not isinstance(response, dict):
raise ClientException(
"Something went wrong with your post: {!r}".format(response)
)
try:
socket = websocket.create_connection(
response["json"]["data"]["websocket_url"], timeout=timeout
)
ws_update = loads(socket.recv())
socket.close()
except websocket.WebSocketTimeoutException:
raise ClientException(
"Websocket error. Check your media file. "
"Your post may still have been created."
)
url = ws_update["payload"]["redirect"]
return self._reddit.submission(url=url)
def _upload_media(self, media_path):
"""Upload media and return its URL. Uses undocumented endpoint."""
if media_path is None:
media_path = join(
dirname(dirname(dirname(__file__))), "images", "PRAW logo.png"
)
file_name = basename(media_path).lower()
mime_type = {
"png": "image/png",
"mov": "video/quicktime",
"mp4": "video/mp4",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"gif": "image/gif",
}.get(
file_name.rpartition(".")[2], "image/jpeg"
) # default to JPEG
img_data = {"filepath": file_name, "mimetype": mime_type}
url = API_PATH["media_asset"]
# until we learn otherwise, assume this request always succeeds
upload_lease = self._reddit.post(url, data=img_data)["args"]
upload_url = "https:{}".format(upload_lease["action"])
upload_data = {
item["name"]: item["value"] for item in upload_lease["fields"]
}
with open(media_path, "rb") as media:
response = self._reddit._core._requestor._http.post(
upload_url, data=upload_data, files={"file": media}
)
response.raise_for_status()
return upload_url + "/" + upload_data["key"]
def random(self):
"""Return a random Submission.
Returns ``None`` on subreddits that do not support the random feature.
One example, at the time of writing, is /r/wallpapers.
"""
url = API_PATH["subreddit_random"].format(subreddit=self)
try:
self._reddit.get(url, params={"unique": self._reddit._next_unique})
except Redirect as redirect:
path = redirect.path
try:
return self._submission_class(
self._reddit, url=urljoin(self._reddit.config.reddit_url, path)
)
except ClientException:
return None
def rules(self):
"""Return rules for the subreddit.
For example to show the rules of ``/r/redditdev`` try:
.. code:: python
reddit.subreddit('redditdev').rules()
"""
return self._reddit.get(API_PATH["rules"].format(subreddit=self))
def search(
self,
query,
sort="relevance",
syntax="lucene",
time_filter="all",
**generator_kwargs
):
"""Return a ListingGenerator for items that match ``query``.
:param query: The query string to search for.
:param sort: Can be one of: relevance, hot, top, new,
comments. (default: relevance).
:param syntax: Can be one of: cloudsearch, lucene, plain
(default: lucene).
:param time_filter: Can be one of: all, day, hour, month, week, year
(default: all).
For more information on building a search query see:
https://www.reddit.com/wiki/search
For example to search all subreddits for ``praw`` try:
.. code:: python
for submission in reddit.subreddit('all').search('praw'):
print(submission.title)
"""
self._validate_time_filter(time_filter)
not_all = self.display_name.lower() != "all"
self._safely_add_arguments(
generator_kwargs,
"params",
q=query,
restrict_sr=not_all,
sort=sort,
syntax=syntax,
t=time_filter,
)
url = API_PATH["search"].format(subreddit=self)
return ListingGenerator(self._reddit, url, **generator_kwargs)
def sticky(self, number=1):
"""Return a Submission object for a sticky of the subreddit.
:param number: Specify which sticky to return. 1 appears at the top
(default: 1).
Raises ``prawcore.NotFound`` if the sticky does not exist.
"""
url = API_PATH["about_sticky"].format(subreddit=self)
try:
self._reddit.get(url, params={"num": number})
except Redirect as redirect:
path = redirect.path
return self._submission_class(
self._reddit, url=urljoin(self._reddit.config.reddit_url, path)
)
def submit(
self,
title,
selftext=None,
url=None,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
collection_id=None,
):
"""Add a submission to the subreddit.
:param title: The title of the submission.
:param selftext: The markdown formatted content for a ``text``
submission. Use an empty string, ``''``, to make a title-only
submission.
:param url: The URL for a ``link`` submission.
:param collection_id: The UUID of a :class:`.Collection` to add the
newly-submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already
been submitted (default: True).
:param send_replies: When True, messages will be sent to the submission
author when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:returns: A :class:`~.Submission` object for the newly created
submission.
Either ``selftext`` or ``url`` can be provided, but not both.
For example to submit a URL to ``/r/reddit_api_test`` do:
.. code:: python
title = 'PRAW documentation'
url = 'https://praw.readthedocs.io'
reddit.subreddit('reddit_api_test').submit(title, url=url)
.. note ::
For submitting images, videos, and videogifs,
see :meth:`.submit_image` and :meth:`.submit_video`.
"""
if (bool(selftext) or selftext == "") == bool(url):
raise TypeError("Either `selftext` or `url` must be provided.")
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
):
if value is not None:
data[key] = value
if selftext is not None:
data.update(kind="self", text=selftext)
else:
data.update(kind="link", url=url)
return self._reddit.post(API_PATH["submit"], data=data)
def submit_image(
self,
title,
image_path,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
timeout=10,
collection_id=None,
):
"""Add an image submission to the subreddit.
:param title: The title of the submission.
:param image_path: The path to an image, to upload and post.
:param collection_id: The UUID of a :class:`.Collection` to add the
newly-submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already
been submitted (default: True).
:param send_replies: When True, messages will be sent to the submission
author when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:param timeout: Specifies a particular timeout, in seconds. Use to
avoid "Websocket error" exceptions (default: 10).
.. note::
Reddit's API uses WebSockets to respond with the link of the
newly created post. If this fails, the method will raise
:class:`.ClientException`. Occasionally, the Reddit post will still
be created. More often, there is an error with the image file. If
you frequently get exceptions but successfully created posts, try
setting the ``timeout`` parameter to a value above 10.
:returns: A :class:`~.Submission` object for the newly created
submission.
For example to submit an image to ``/r/reddit_api_test`` do:
.. code:: python
title = 'My favorite picture'
image = '/path/to/image.png'
reddit.subreddit('reddit_api_test').submit_image(title, image)
"""
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
):
if value is not None:
data[key] = value
data.update(kind="image", url=self._upload_media(image_path))
return self._submit_media(data, timeout)
def submit_video(
self,
title,
video_path,
videogif=False,
thumbnail_path=None,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
timeout=10,
collection_id=None,
):
"""Add a video or videogif submission to the subreddit.
:param title: The title of the submission.
:param video_path: The path to a video, to upload and post.
:param videogif: A ``bool`` value. If ``True``, the video is
uploaded as a videogif, which is essentially a silent video
(default: ``False``).
:param thumbnail_path: (Optional) The path to an image, to be uploaded
and used as the thumbnail for this video. If not provided, the
PRAW logo will be used as the thumbnail.
:param collection_id: The UUID of a :class:`.Collection` to add the
newly-submitted post to.
:param flair_id: The flair template to select (default: ``None``).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: ``None``).
:param resubmit: When False, an error will occur if the URL has already
been submitted (default: ``True``).
:param send_replies: When True, messages will be sent to the submission
author when comments are made to the submission
(default: ``True``).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:param timeout: Specifies a particular timeout, in seconds. Use to
avoid "Websocket error" exceptions (default: 10).
.. note::
Reddit's API uses WebSockets to respond with the link of the
newly created post. If this fails, the method will raise
:class:`.ClientException`. Occasionally, the Reddit post will still
be created. More often, there is an error with the video file. If
you frequently get exceptions but successfully created posts, try
setting the ``timeout`` parameter to a value above 10.
:returns: A :class:`~.Submission` object for the newly created
submission.
For example to submit a video to ``/r/reddit_api_test`` do:
.. code:: python
title = 'My favorite movie'
video = '/path/to/video.mp4'
reddit.subreddit('reddit_api_test').submit_video(title, video)
"""
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
):
if value is not None:
data[key] = value
data.update(
kind="videogif" if videogif else "video",
url=self._upload_media(video_path),
# if thumbnail_path is None, it uploads the PRAW logo
video_poster_url=self._upload_media(thumbnail_path),
)
return self._submit_media(data, timeout)
def subscribe(self, other_subreddits=None):
"""Subscribe to the subreddit.
:param other_subreddits: When provided, also subscribe to the provided
list of subreddits.
"""
data = {
"action": "sub",
"skip_inital_defaults": True,
"sr_name": self._subreddit_list(self, other_subreddits),
}
self._reddit.post(API_PATH["subscribe"], data=data)
def traffic(self):
"""Return a dictionary of the subreddit's traffic statistics.
Raises ``prawcore.NotFound`` when the traffic stats aren't available to
the authenticated user, that is, they are not public and the
authenticated user is not a moderator of the subreddit.
"""
return self._reddit.get(
API_PATH["about_traffic"].format(subreddit=self)
)
def unsubscribe(self, other_subreddits=None):
"""Unsubscribe from the subreddit.
:param other_subreddits: When provided, also unsubscribe to the
provided list of subreddits.
"""
data = {
"action": "unsub",
"sr_name": self._subreddit_list(self, other_subreddits),
}
self._reddit.post(API_PATH["subscribe"], data=data)
class SubredditFilters(object):
"""Provide functions to interact with the special Subreddit's filters.
Members of this class should be utilized via ``Subreddit.filters``. For
example to add a filter run:
.. code:: python
reddit.subreddit('all').filters.add('subreddit_name')
"""
def __init__(self, subreddit):
"""Create a SubredditFilters instance.
:param subreddit: The special subreddit whose filters to work with.
As of this writing filters can only be used with the special subreddits
``all`` and ``mod``.
"""
self.subreddit = subreddit
def __iter__(self):
"""Iterate through the special subreddit's filters.
This method should be invoked as:
.. code:: python
for subreddit in reddit.subreddit('NAME').filters:
...
"""
url = API_PATH["subreddit_filter_list"].format(
special=self.subreddit, user=self.subreddit._reddit.user.me()
)
params = {"unique": self.subreddit._reddit._next_unique}
response_data = self.subreddit._reddit.get(url, params=params)
for subreddit in response_data.subreddits:
yield subreddit
def add(self, subreddit):
"""Add ``subreddit`` to the list of filtered subreddits.
:param subreddit: The subreddit to add to the filter list.
Items from subreddits added to the filtered list will no longer be
included when obtaining listings for ``/r/all``.
Alternatively, you can filter a subreddit temporarily from a special
listing in a manner like so:
.. code:: python
reddit.subreddit('all-redditdev-learnpython')
Raises ``prawcore.NotFound`` when calling on a non-special subreddit.
"""
url = API_PATH["subreddit_filter"].format(
special=self.subreddit,
user=self.subreddit._reddit.user.me(),
subreddit=subreddit,
)
self.subreddit._reddit.request(
"PUT", url, data={"model": dumps({"name": str(subreddit)})}
)
def remove(self, subreddit):
"""Remove ``subreddit`` from the list of filtered subreddits.
:param subreddit: The subreddit to remove from the filter list.
Raises ``prawcore.NotFound`` when calling on a non-special subreddit.
"""
url = API_PATH["subreddit_filter"].format(
special=self.subreddit,
user=self.subreddit._reddit.user.me(),
subreddit=str(subreddit),
)
self.subreddit._reddit.request("DELETE", url, data={})
class SubredditFlair(object):
"""Provide a set of functions to interact with a Subreddit's flair."""
@cachedproperty
def link_templates(self):
"""Provide an instance of :class:`.SubredditLinkFlairTemplates`.
Use this attribute for interacting with a subreddit's link flair
templates. For example to list all the link flair templates for a
subreddit which you have the ``flair`` moderator permission on try:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.link_templates:
print(template)
"""
return SubredditLinkFlairTemplates(self.subreddit)
@cachedproperty
def templates(self):
"""Provide an instance of :class:`.SubredditRedditorFlairTemplates`.
Use this attribute for interacting with a subreddit's flair
templates. For example to list all the flair templates for a subreddit
which you have the ``flair`` moderator permission on try:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.templates:
print(template)
"""
return SubredditRedditorFlairTemplates(self.subreddit)
def __call__(self, redditor=None, **generator_kwargs):
"""Return a generator for Redditors and their associated flair.
:param redditor: When provided, yield at most a single
:class:`~.Redditor` instance (default: None).
This method is intended to be used like:
.. code-block:: python
for flair in reddit.subreddit('NAME').flair(limit=None):
print(flair)
"""
Subreddit._safely_add_arguments(
generator_kwargs, "params", name=redditor
)
generator_kwargs.setdefault("limit", None)
url = API_PATH["flairlist"].format(subreddit=self.subreddit)
return ListingGenerator(
self.subreddit._reddit, url, **generator_kwargs
)
def __init__(self, subreddit):
"""Create a SubredditFlair instance.
:param subreddit: The subreddit whose flair to work with.
"""
self.subreddit = subreddit
def configure(
self,
position="right",
self_assign=False,
link_position="left",
link_self_assign=False,
**settings
):
"""Update the subreddit's flair configuration.
:param position: One of left, right, or False to disable (default:
right).
:param self_assign: (boolean) Permit self assignment of user flair
(default: False).
:param link_position: One of left, right, or False to disable
(default: left).
:param link_self_assign: (boolean) Permit self assignment
of link flair (default: False).
Additional keyword arguments can be provided to handle new settings as
Reddit introduces them.
"""
data = {
"flair_enabled": bool(position),
"flair_position": position or "right",
"flair_self_assign_enabled": self_assign,
"link_flair_position": link_position or "",
"link_flair_self_assign_enabled": link_self_assign,
}
data.update(settings)
url = API_PATH["flairconfig"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def delete(self, redditor):
"""Delete flair for a Redditor.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
.. note:: To delete the flair of many Redditors at once, please see
:meth:`~praw.models.reddit.subreddit.SubredditFlair.update`.
"""
url = API_PATH["deleteflair"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"name": str(redditor)})
def delete_all(self):
"""Delete all Redditor flair in the Subreddit.
:returns: List of dictionaries indicating the success or failure of
each delete.
"""
return self.update(x["user"] for x in self())
def set(
self, redditor=None, text="", css_class="", flair_template_id=None
):
"""Set flair for a Redditor.
:param redditor: (Required) A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param text: The flair text to associate with the Redditor or
Submission (default: '').
:param css_class: The css class to associate with the flair html
(default: ''). Use either this or ``flair_template_id``.
:param flair_template_id: The ID of the flair template to be used
(default: ``None``). Use either this or ``css_class``.
This method can only be used by an authenticated user who is a
moderator of the associated Subreddit.
Example:
.. code:: python
reddit.subreddit('redditdev').flair.set('bboe', 'PRAW author',
css_class='mods')
template = '6bd28436-1aa7-11e9-9902-0e05ab0fad46'
reddit.subreddit('redditdev').flair.set('spez', 'Reddit CEO',
flair_template_id=template)
"""
if css_class and flair_template_id is not None:
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with `flair_template_id`."
)
data = {"name": str(redditor), "text": text}
if flair_template_id is not None:
data["flair_template_id"] = flair_template_id
url = API_PATH["select_flair"].format(subreddit=self.subreddit)
else:
data["css_class"] = css_class
url = API_PATH["flair"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def update(self, flair_list, text="", css_class=""):
"""Set or clear the flair for many Redditors at once.
:param flair_list: Each item in this list should be either: the name of
a Redditor, an instance of :class:`.Redditor`, or a dictionary
mapping keys ``user``, ``flair_text``, and ``flair_css_class`` to
their respective values. The ``user`` key should map to a Redditor,
as described above. When a dictionary isn't provided, or the
dictionary is missing one of ``flair_text``, or ``flair_css_class``
attributes the default values will come from the the following
arguments.
:param text: The flair text to use when not explicitly provided in
``flair_list`` (default: '').
:param css_class: The css class to use when not explicitly provided in
``flair_list`` (default: '').
:returns: List of dictionaries indicating the success or failure of
each update.
For example to clear the flair text, and set the ``praw`` flair css
class on a few users try:
.. code:: python
subreddit.flair.update(['bboe', 'spez', 'spladug'],
css_class='praw')
"""
lines = []
for item in flair_list:
if isinstance(item, dict):
fmt_data = (
str(item["user"]),
item.get("flair_text", text),
item.get("flair_css_class", css_class),
)
else:
fmt_data = (str(item), text, css_class)
lines.append('"{}","{}","{}"'.format(*fmt_data))
response = []
url = API_PATH["flaircsv"].format(subreddit=self.subreddit)
while lines:
data = {"flair_csv": "\n".join(lines[:100])}
response.extend(self.subreddit._reddit.post(url, data=data))
lines = lines[100:]
return response
class SubredditFlairTemplates(object):
"""Provide functions to interact with a Subreddit's flair templates."""
@staticmethod
def flair_type(is_link):
"""Return LINK_FLAIR or USER_FLAIR depending on ``is_link`` value."""
return "LINK_FLAIR" if is_link else "USER_FLAIR"
def __init__(self, subreddit):
"""Create a SubredditFlairTemplate instance.
:param subreddit: The subreddit whose flair templates to work with.
.. note:: This class should not be initialized directly. Instead obtain
an instance via:
``reddit.subreddit('subreddit_name').flair.templates`` or
``reddit.subreddit('subreddit_name').flair.link_templates``.
"""
self.subreddit = subreddit
def _add(
self,
text,
css_class="",
text_editable=False,
is_link=None,
background_color=None,
text_color=None,
mod_only=None,
):
if css_class and any(
param is not None
for param in (background_color, text_color, mod_only)
):
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with parameters `background_color`, "
"`text_color`, or `mod_only`."
)
if css_class:
url = API_PATH["flairtemplate"].format(subreddit=self.subreddit)
data = {
"css_class": css_class,
"flair_type": self.flair_type(is_link),
"text": text,
"text_editable": bool(text_editable),
}
else:
url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit)
data = {
"background_color": background_color,
"text_color": text_color,
"flair_type": self.flair_type(is_link),
"text": text,
"text_editable": bool(text_editable),
"mod_only": bool(mod_only),
}
self.subreddit._reddit.post(url, data=data)
def _clear(self, is_link=None):
url = API_PATH["flairtemplateclear"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(
url, data={"flair_type": self.flair_type(is_link)}
)
def delete(self, template_id):
"""Remove a flair template provided by ``template_id``.
For example, to delete the first Redditor flair template listed, try:
.. code-block:: python
template_info = list(subreddit.flair.templates)[0]
subreddit.flair.templates.delete(template_info['id'])
"""
url = API_PATH["flairtemplatedelete"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(
url, data={"flair_template_id": template_id}
)
def update(
self,
template_id,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
):
"""Update the flair template provided by ``template_id``.
:param template_id: The flair template to update.
:param text: The flair template's new text (required).
:param css_class: The flair template's new css_class (default: '').
Cannot be used in conjunction with ``background_color``,
``text_color``, or ``mod_only``.
:param text_editable: (boolean) Indicate if the flair text can be
modified for each Redditor that sets it (default: False).
:param background_color: The flair template's new background color,
as a hex color. Cannot be used in conjunction with ``css_class``.
:param text_color: The flair template's new text color, either
``'light'`` or ``'dark'``. Cannot be used in conjunction with
``css_class``.
:param mod_only: (boolean) Indicate if the flair can only be used by
moderators. Cannot be used in conjunction with ``css_class``.
For example to make a user flair template text_editable, try:
.. code-block:: python
template_info = list(subreddit.flair.templates)[0]
subreddit.flair.templates.update(
template_info['id'],
template_info['flair_text'],
text_editable=True)
.. note::
Any parameters not provided will be set to default values (usually
``None`` or ``False``) on Reddit's end.
"""
if css_class and any(
param is not None
for param in (background_color, text_color, mod_only)
):
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with parameters `background_color`, "
"`text_color`, or `mod_only`."
)
if css_class:
url = API_PATH["flairtemplate"].format(subreddit=self.subreddit)
data = {
"css_class": css_class,
"flair_template_id": template_id,
"text": text,
"text_editable": bool(text_editable),
}
else:
url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit)
data = {
"flair_template_id": template_id,
"text": text,
"background_color": background_color,
"text_color": text_color,
"text_editable": text_editable,
"mod_only": mod_only,
}
self.subreddit._reddit.post(url, data=data)
class SubredditRedditorFlairTemplates(SubredditFlairTemplates):
"""Provide functions to interact with Redditor flair templates."""
def __iter__(self):
"""Iterate through the user flair templates.
Example:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.templates:
print(template)
"""
url = API_PATH["user_flair"].format(subreddit=self.subreddit)
params = {"unique": self.subreddit._reddit._next_unique}
for template in self.subreddit._reddit.get(url, params=params):
yield template
def add(
self,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
):
"""Add a Redditor flair template to the associated subreddit.
:param text: The flair template's text (required).
:param css_class: The flair template's css_class (default: '').
Cannot be used in conjunction with ``background_color``,
``text_color``, or ``mod_only``.
:param text_editable: (boolean) Indicate if the flair text can be
modified for each Redditor that sets it (default: False).
:param background_color: The flair template's new background color,
as a hex color. Cannot be used in conjunction with ``css_class``.
:param text_color: The flair template's new text color, either
``'light'`` or ``'dark'``. Cannot be used in conjunction with
``css_class``.
:param mod_only: (boolean) Indicate if the flair can only be used by
moderators. Cannot be used in conjunction with ``css_class``.
For example, to add an editable Redditor flair try:
.. code-block:: python
reddit.subreddit('NAME').flair.templates.add(
css_class='praw', text_editable=True)
"""
self._add(
text,
css_class=css_class,
text_editable=text_editable,
is_link=False,
background_color=background_color,
text_color=text_color,
mod_only=mod_only,
)
def clear(self):
"""Remove all Redditor flair templates from the subreddit.
For example:
.. code-block:: python
reddit.subreddit('NAME').flair.templates.clear()
"""
self._clear(is_link=False)
class SubredditLinkFlairTemplates(SubredditFlairTemplates):
"""Provide functions to interact with link flair templates."""
def __iter__(self):
"""Iterate through the link flair templates.
Example:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.link_templates:
print(template)
"""
url = API_PATH["link_flair"].format(subreddit=self.subreddit)
for template in self.subreddit._reddit.get(url):
yield template
def add(
self,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
):
"""Add a link flair template to the associated subreddit.
:param text: The flair template's text (required).
:param css_class: The flair template's css_class (default: '').
Cannot be used in conjunction with ``background_color``,
``text_color``, or ``mod_only``.
:param text_editable: (boolean) Indicate if the flair text can be
modified for each Redditor that sets it (default: False).
:param background_color: The flair template's new background color,
as a hex color. Cannot be used in conjunction with ``css_class``.
:param text_color: The flair template's new text color, either
``'light'`` or ``'dark'``. Cannot be used in conjunction with
``css_class``.
:param mod_only: (boolean) Indicate if the flair can only be used by
moderators. Cannot be used in conjunction with ``css_class``.
For example, to add an editable link flair try:
.. code-block:: python
reddit.subreddit('NAME').flair.link_templates.add(
css_class='praw', text_editable=True)
"""
self._add(
text,
css_class=css_class,
text_editable=text_editable,
is_link=True,
background_color=background_color,
text_color=text_color,
mod_only=mod_only,
)
def clear(self):
"""Remove all link flair templates from the subreddit.
For example:
.. code-block:: python
reddit.subreddit('NAME').flair.link_templates.clear()
"""
self._clear(is_link=True)
class SubredditModeration(object):
"""Provides a set of moderation functions to a Subreddit."""
@staticmethod
def _handle_only(only, generator_kwargs):
if only is not None:
if only == "submissions":
only = "links"
RedditBase._safely_add_arguments(
generator_kwargs, "params", only=only
)
def __init__(self, subreddit):
"""Create a SubredditModeration instance.
:param subreddit: The subreddit to moderate.
"""
self.subreddit = subreddit
def accept_invite(self):
"""Accept an invitation as a moderator of the community."""
url = API_PATH["accept_mod_invite"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def edited(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for edited comments and submissions.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print all items in the edited queue try:
.. code:: python
for item in reddit.subreddit('mod').mod.edited(limit=None):
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_edited"].format(subreddit=self.subreddit),
**generator_kwargs
)
def inbox(self, **generator_kwargs):
"""Return a ListingGenerator for moderator messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
See ``unread`` for unread moderator messages.
To print the last 5 moderator mail messages and their replies, try:
.. code:: python
for message in reddit.subreddit('mod').mod.inbox(limit=5):
print("From: {}, Body: {}".format(message.author, message.body))
for reply in message.replies:
print("From: {}, Body: {}".format(reply.author, reply.body))
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["moderator_messages"].format(subreddit=self.subreddit),
**generator_kwargs
)
def log(self, action=None, mod=None, **generator_kwargs):
"""Return a ListingGenerator for moderator log entries.
:param action: If given, only return log entries for the specified
action.
:param mod: If given, only return log entries for actions made by the
passed in Redditor.
To print the moderator and subreddit of the last 5 modlog entries try:
.. code:: python
for log in reddit.subreddit('mod').mod.log(limit=5):
print("Mod: {}, Subreddit: {}".format(log.mod, log.subreddit))
"""
params = {"mod": str(mod) if mod else mod, "type": action}
Subreddit._safely_add_arguments(generator_kwargs, "params", **params)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_log"].format(subreddit=self.subreddit),
**generator_kwargs
)
def modqueue(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for comments/submissions in the modqueue.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print all modqueue items try:
.. code:: python
for item in reddit.subreddit('mod').mod.modqueue(limit=None):
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_modqueue"].format(subreddit=self.subreddit),
**generator_kwargs
)
def reports(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for reported comments and submissions.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the user and mod report reasons in the report queue try:
.. code:: python
for reported_item in reddit.subreddit('mod').mod.reports():
print("User Reports: {}".format(reported_item.user_reports))
print("Mod Reports: {}".format(reported_item.mod_reports))
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_reports"].format(subreddit=self.subreddit),
**generator_kwargs
)
def settings(self):
"""Return a dictionary of the subreddit's current settings."""
url = API_PATH["subreddit_settings"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url)["data"]
def spam(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for spam comments and submissions.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the items in the spam queue try:
.. code:: python
for item in reddit.subreddit('mod').mod.spam():
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_spam"].format(subreddit=self.subreddit),
**generator_kwargs
)
def unmoderated(self, **generator_kwargs):
"""Return a ListingGenerator for unmoderated submissions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the items in the unmoderated queue try:
.. code:: python
for item in reddit.subreddit('mod').mod.unmoderated():
print(item)
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_unmoderated"].format(subreddit=self.subreddit),
**generator_kwargs
)
def unread(self, **generator_kwargs):
"""Return a ListingGenerator for unread moderator messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
See ``inbox`` for all messages.
To print the mail in the unread modmail queue try:
.. code:: python
for message in reddit.subreddit('mod').mod.unread():
print("From: {}, To: {}".format(message.author, message.dest))
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["moderator_unread"].format(subreddit=self.subreddit),
**generator_kwargs
)
def update(self, **settings):
"""Update the subreddit's settings.
:param allow_images: Allow users to upload images using the native
image hosting. Only applies to link-only subreddits.
:param allow_post_crossposts: Allow users to crosspost submissions from
other subreddits.
:param allow_top: Allow the subreddit to appear on ``/r/all`` as well
as the default and trending lists.
:param collapse_deleted_comments: Collapse deleted and removed comments
on comments pages by default.
:param comment_score_hide_mins: The number of minutes to hide comment
scores.
:param description: Shown in the sidebar of your subreddit.
:param disable_contributor_requests: Specifies whether redditors may
send automated modmail messages requesting approval as a submitter.
:type disable_contributor_requests: bool
:param domain: Domain name with a cname that points to
{subreddit}.reddit.com.
:param exclude_banned_modqueue: Exclude posts by site-wide banned users
from modqueue/unmoderated.
:param header_hover_text: The text seen when hovering over the snoo.
:param hide_ads: Don't show ads within this subreddit. Only applies to
gold-user only subreddits.
:param key_color: A 6-digit rgb hex color (e.g. ``'#AABBCC'``), used as
a thematic color for your subreddit on mobile.
:param lang: A valid IETF language tag (underscore separated).
:param link_type: The types of submissions users can make.
One of ``any``, ``link``, ``self``.
:param over_18: Viewers must be over 18 years old (i.e. NSFW).
:param public_description: Public description blurb. Appears in search
results and on the landing page for private subreddits.
:param public_traffic: Make the traffic stats page public.
:param restrict_commenting: Specifies whether approved users have the
ability to comment.
:type restrict_commenting: bool
:param restrict_posting: Specifies whether approved users have the
ability to submit posts.
:type restrict_posting: bool
:param show_media: Show thumbnails on submissions.
:param show_media_preview: Expand media previews on comments pages.
:param spam_comments: Spam filter strength for comments.
One of ``all``, ``low``, ``high``.
:param spam_links: Spam filter strength for links.
One of ``all``, ``low``, ``high``.
:param spam_selfposts: Spam filter strength for selfposts.
One of ``all``, ``low``, ``high``.
:param spoilers_enabled: Enable marking posts as containing spoilers.
:param sr: The fullname of the subreddit whose settings will be
updated.
:param submit_link_label: Custom label for submit link button
(None for default).
:param submit_text: Text to show on submission page.
:param submit_text_label: Custom label for submit text post button
(None for default).
:param subreddit_type: One of ``archived``, ``employees_only``,
``gold_only``, ``gold_restricted``, ``private``, ``public``,
``restricted``.
:param suggested_comment_sort: All comment threads will use this
sorting method by default. Leave None, or choose one of
``confidence``, ``controversial``, ``new``, ``old``, ``qa``,
``random``, ``top``.
:param title: The title of the subreddit.
:param wiki_edit_age: Account age, in days, required to edit and create
wiki pages.
:param wiki_edit_karma: Subreddit karma required to edit and create
wiki pages.
:param wikimode: One of ``anyone``, ``disabled``, ``modonly``.
Additional keyword arguments can be provided to handle new settings as
Reddit introduces them.
Settings that are documented here and aren't explicitly set by you in a
call to :meth:`.SubredditModeration.update` should retain their current
value. If they do not please file a bug.
.. warning:: Undocumented settings, or settings that were very recently
documented, may not retain their current value when
updating. This often occurs when Reddit adds a new setting
but forgets to add that setting to the API endpoint that
is used to fetch the current settings.
"""
current_settings = self.settings()
fullname = current_settings.pop("subreddit_id")
# These attributes come out using different names than they go in.
remap = {
"allow_top": "default_set",
"lang": "language",
"link_type": "content_options",
}
for (new, old) in remap.items():
current_settings[new] = current_settings.pop(old)
current_settings.update(settings)
return Subreddit._create_or_update(
_reddit=self.subreddit._reddit, sr=fullname, **current_settings
)
class SubredditQuarantine(object):
"""Provides subreddit quarantine related methods."""
def __init__(self, subreddit):
"""Create a SubredditQuarantine instance.
:param subreddit: The subreddit associated with the quarantine.
"""
self.subreddit = subreddit
def opt_in(self):
"""Permit your user access to the quarantined subreddit.
Usage:
.. code:: python
subreddit = reddit.subreddit('QUESTIONABLE')
next(subreddit.hot()) # Raises prawcore.Forbidden
subreddit.quaran.opt_in()
next(subreddit.hot()) # Returns Submission
"""
data = {"sr_name": self.subreddit}
try:
self.subreddit._reddit.post(
API_PATH["quarantine_opt_in"], data=data
)
except Redirect:
pass
def opt_out(self):
"""Remove access to the quarantined subreddit.
Usage:
.. code:: python
subreddit = reddit.subreddit('QUESTIONABLE')
next(subreddit.hot()) # Returns Submission
subreddit.quaran.opt_out()
next(subreddit.hot()) # Raises prawcore.Forbidden
"""
data = {"sr_name": self.subreddit}
try:
self.subreddit._reddit.post(
API_PATH["quarantine_opt_out"], data=data
)
except Redirect:
pass
class SubredditRelationship(object):
"""Represents a relationship between a redditor and subreddit.
Instances of this class can be iterated through in order to discover the
Redditors that make up the relationship.
For example, banned users of a subreddit can be iterated through like so:
.. code-block:: python
for ban in reddit.subreddit('redditdev').banned():
print('{}: {}'.format(ban, ban.note))
"""
def __call__(self, redditor=None, **generator_kwargs):
"""Return a generator for Redditors belonging to this relationship.
:param redditor: When provided, yield at most a single
:class:`~.Redditor` instance. This is useful to confirm if a
relationship exists, or to fetch the metadata associated with a
particular relationship (default: None).
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
Subreddit._safely_add_arguments(
generator_kwargs, "params", user=redditor
)
url = API_PATH["list_{}".format(self.relationship)].format(
subreddit=self.subreddit
)
return ListingGenerator(
self.subreddit._reddit, url, **generator_kwargs
)
def __init__(self, subreddit, relationship):
"""Create a SubredditRelationship instance.
:param subreddit: The subreddit for the relationship.
:param relationship: The name of the relationship.
"""
self.relationship = relationship
self.subreddit = subreddit
def add(self, redditor, **other_settings):
"""Add ``redditor`` to this relationship.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
"""
data = {"name": str(redditor), "type": self.relationship}
data.update(other_settings)
url = API_PATH["friend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def remove(self, redditor):
"""Remove ``redditor`` from this relationship.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
"""
data = {"name": str(redditor), "type": self.relationship}
url = API_PATH["unfriend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
class ContributorRelationship(SubredditRelationship):
"""Provides methods to interact with a Subreddit's contributors.
Contributors are also known as approved submitters.
Contributors of a subreddit can be iterated through like so:
.. code-block:: python
for contributor in reddit.subreddit('redditdev').contributor():
print(contributor)
"""
def leave(self):
"""Abdicate the contributor position."""
self.subreddit._reddit.post(
API_PATH["leavecontributor"], data={"id": self.subreddit.fullname}
)
class ModeratorRelationship(SubredditRelationship):
"""Provides methods to interact with a Subreddit's moderators.
Moderators of a subreddit can be iterated through like so:
.. code-block:: python
for moderator in reddit.subreddit('redditdev').moderator():
print(moderator)
"""
PERMISSIONS = {"access", "config", "flair", "mail", "posts", "wiki"}
@staticmethod
def _handle_permissions(permissions, other_settings):
other_settings = deepcopy(other_settings) if other_settings else {}
other_settings["permissions"] = permissions_string(
permissions, ModeratorRelationship.PERMISSIONS
)
return other_settings
def __call__(self, redditor=None): # pylint: disable=arguments-differ
"""Return a list of Redditors who are moderators.
:param redditor: When provided, return a list containing at most one
:class:`~.Redditor` instance. This is useful to confirm if a
relationship exists, or to fetch the metadata associated with a
particular relationship (default: None).
.. note:: Unlike other relationship callables, this relationship is not
paginated. Thus it simply returns the full list, rather than
an iterator for the results.
To be used like:
.. code:: python
moderators = reddit.subreddit('nameofsub').moderator()
For example, to list the moderators along with their permissions try:
.. code:: python
for moderator in reddit.subreddit('SUBREDDIT').moderator():
print('{}: {}'.format(moderator, moderator.mod_permissions))
"""
params = {} if redditor is None else {"user": redditor}
url = API_PATH["list_{}".format(self.relationship)].format(
subreddit=self.subreddit
)
return self.subreddit._reddit.get(url, params=params)
# pylint: disable=arguments-differ
def add(self, redditor, permissions=None, **other_settings):
"""Add or invite ``redditor`` to be a moderator of the subreddit.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided ``None``, indicates full permissions.
An invite will be sent unless the user making this call is an admin
user.
For example, to invite ``'spez'`` with ``'posts'`` and ``'mail'``
permissions to ``'/r/test/``, try:
.. code:: python
reddit.subreddit('test').moderator.add('spez', ['posts', 'mail'])
"""
other_settings = self._handle_permissions(permissions, other_settings)
super(ModeratorRelationship, self).add(redditor, **other_settings)
# pylint: enable=arguments-differ
def invite(self, redditor, permissions=None, **other_settings):
"""Invite ``redditor`` to be a moderator of the subreddit.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided ``None``, indicates full permissions.
For example, to invite ``'spez'`` with ``'posts'`` and ``'mail'``
permissions to ``'/r/test/``, try:
.. code:: python
reddit.subreddit('test').moderator.invite('spez', ['posts', 'mail'])
"""
data = self._handle_permissions(permissions, other_settings)
data.update({"name": str(redditor), "type": "moderator_invite"})
url = API_PATH["friend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def leave(self):
"""Abdicate the moderator position (use with care).
Example:
.. code:: python
reddit.subreddit('subredditname').moderator.leave()
"""
self.remove(self.subreddit._reddit.config.username)
def remove_invite(self, redditor):
"""Remove the moderator invite for ``redditor``.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
Example:
.. code:: python
reddit.subreddit('subredditname').moderator.remove_invite('spez')
"""
data = {"name": str(redditor), "type": "moderator_invite"}
url = API_PATH["unfriend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def update(self, redditor, permissions=None):
"""Update the moderator permissions for ``redditor``.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided, ``None``, indicates full permissions.
For example, to add all permissions to the moderator, try:
.. code:: python
subreddit.moderator.update('spez')
To remove all permissions from the moderator, try:
.. code:: python
subreddit.moderator.update('spez', [])
"""
url = API_PATH["setpermissions"].format(subreddit=self.subreddit)
data = self._handle_permissions(
permissions, {"name": str(redditor), "type": "moderator"}
)
self.subreddit._reddit.post(url, data=data)
def update_invite(self, redditor, permissions=None):
"""Update the moderator invite permissions for ``redditor``.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided, ``None``, indicates full permissions.
For example, to grant the flair and mail permissions to the moderator
invite, try:
.. code:: python
subreddit.moderator.update_invite('spez', ['flair', 'mail'])
"""
url = API_PATH["setpermissions"].format(subreddit=self.subreddit)
data = self._handle_permissions(
permissions, {"name": str(redditor), "type": "moderator_invite"}
)
self.subreddit._reddit.post(url, data=data)
class Modmail(object):
"""Provides modmail functions for a subreddit."""
def __call__(self, id=None, mark_read=False): # noqa: D207, D301
"""Return an individual conversation.
:param id: A reddit base36 conversation ID, e.g., ``2gmz``.
:param mark_read: If True, conversation is marked as read
(default: False).
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz', mark_read=True)
To print all messages from a conversation as Markdown source:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz', \
mark_read=True)
for message in conversation.messages:
print(message.body_markdown)
``ModmailConversation.user`` is a special instance of
:class:`.Redditor` with extra attributes describing the non-moderator
user's recent posts, comments, and modmail messages within the
subreddit, as well as information on active bans and mutes. This
attribute does not exist on internal moderator discussions.
For example, to print the user's ban status:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz', \
mark_read=True)
print(conversation.user.ban_status)
To print a list of recent submissions by the user:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz', \
mark_read=True)
print(conversation.user.recent_posts)
"""
# pylint: disable=invalid-name,redefined-builtin
return ModmailConversation(
self.subreddit._reddit, id=id, mark_read=mark_read
)
def __init__(self, subreddit):
"""Construct an instance of the Modmail object."""
self.subreddit = subreddit
def _build_subreddit_list(self, other_subreddits):
"""Return a comma-separated list of subreddit display names."""
subreddits = [self.subreddit] + (other_subreddits or [])
return ",".join(str(subreddit) for subreddit in subreddits)
def bulk_read(self, other_subreddits=None, state=None):
"""Mark conversations for subreddit(s) as read.
Due to server-side restrictions, 'all' is not a valid subreddit for
this method. Instead, use :meth:`~.Modmail.subreddits` to get a list of
subreddits using the new modmail.
:param other_subreddits: A list of :class:`.Subreddit` instances for
which to mark conversations (default: None).
:param state: Can be one of: all, archived, highlighted, inprogress,
mod, new, notifications, (default: all). "all" does not include
internal or archived conversations.
:returns: A list of :class:`.ModmailConversation` instances that were
marked read.
For example, to mark all notifications for a subreddit as read:
.. code:: python
subreddit = reddit.subreddit('redditdev')
subreddit.modmail.bulk_read(state='notifications')
"""
params = {"entity": self._build_subreddit_list(other_subreddits)}
if state:
params["state"] = state
response = self.subreddit._reddit.post(
API_PATH["modmail_bulk_read"], params=params
)
return [
self(conversation_id)
for conversation_id in response["conversation_ids"]
]
def conversations(
self,
after=None,
limit=None,
other_subreddits=None,
sort=None,
state=None,
): # noqa: D207, D301
"""Generate :class:`.ModmailConversation` objects for subreddit(s).
:param after: A base36 modmail conversation id. When provided, the
listing begins after this conversation (default: None).
:param limit: The maximum number of conversations to fetch. If None,
the server-side default is 25 at the time of writing
(default: None).
:param other_subreddits: A list of :class:`.Subreddit` instances for
which to fetch conversations (default: None).
:param sort: Can be one of: mod, recent, unread, user
(default: recent).
:param state: Can be one of: all, archived, highlighted, inprogress,
mod, new, notifications, (default: all). "all" does not include
internal or archived conversations.
Example:
.. code:: python
conversations = reddit.subreddit('all').modmail.conversations(\
state='mod')
"""
params = {}
if self.subreddit != "all":
params["entity"] = self._build_subreddit_list(other_subreddits)
for name, value in {
"after": after,
"limit": limit,
"sort": sort,
"state": state,
}.items():
if value:
params[name] = value
response = self.subreddit._reddit.get(
API_PATH["modmail_conversations"], params=params
)
for conversation_id in response["conversationIds"]:
data = {
"conversation": response["conversations"][conversation_id],
"messages": response["messages"],
}
yield ModmailConversation.parse(
data, self.subreddit._reddit, convert_objects=False
)
def create(self, subject, body, recipient, author_hidden=False):
"""Create a new modmail conversation.
:param subject: The message subject. Cannot be empty.
:param body: The message body. Cannot be empty.
:param recipient: The recipient; a username or an instance of
:class:`.Redditor`.
:param author_hidden: When True, author is hidden from non-moderators
(default: False).
:returns: A :class:`.ModmailConversation` object for the newly created
conversation.
.. code:: python
subreddit = reddit.subreddit('redditdev')
redditor = reddit.redditor('bboe')
subreddit.modmail.create('Subject', 'Body', redditor)
"""
data = {
"body": body,
"isAuthorHidden": author_hidden,
"srName": self.subreddit,
"subject": subject,
"to": recipient,
}
return self.subreddit._reddit.post(
API_PATH["modmail_conversations"], data=data
)
def subreddits(self):
"""Yield subreddits using the new modmail that the user moderates.
Example:
.. code:: python
subreddits = reddit.subreddit('all').modmail.subreddits()
"""
response = self.subreddit._reddit.get(API_PATH["modmail_subreddits"])
for value in response["subreddits"].values():
subreddit = self.subreddit._reddit.subreddit(value["display_name"])
subreddit.last_updated = value["lastUpdated"]
yield subreddit
def unread_count(self):
"""Return unread conversation count by conversation state.
At time of writing, possible states are: archived, highlighted,
inprogress, mod, new, notifications.
:returns: A dict mapping conversation states to unread counts.
For example, to print the count of unread moderator discussions:
.. code:: python
subreddit = reddit.subreddit('redditdev')
unread_counts = subreddit.modmail.unread_count()
print(unread_counts['mod'])
"""
return self.subreddit._reddit.get(API_PATH["modmail_unread_count"])
class SubredditStream(object):
"""Provides submission and comment streams."""
def __init__(self, subreddit):
"""Create a SubredditStream instance.
:param subreddit: The subreddit associated with the streams.
"""
self.subreddit = subreddit
def comments(self, **stream_options):
"""Yield new comments as they become available.
Comments are yielded oldest first. Up to 100 historical comments will
initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example, to retrieve all new comments made to the ``iama``
subreddit, try:
.. code:: python
for comment in reddit.subreddit('iama').stream.comments():
print(comment)
To only retreive new submissions starting when the stream is
created, pass `skip_existing=True`:
.. code:: python
subreddit = reddit.subreddit('iama')
for comment in subreddit.stream.comments(skip_existing=True):
print(comment)
"""
return stream_generator(self.subreddit.comments, **stream_options)
def submissions(self, **stream_options):
"""Yield new submissions as they become available.
Submissions are yielded oldest first. Up to 100 historical submissions
will initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example to retrieve all new submissions made to all of Reddit, try:
.. code:: python
for submission in reddit.subreddit('all').stream.submissions():
print(submission)
"""
return stream_generator(self.subreddit.new, **stream_options)
class SubredditStylesheet(object):
"""Provides a set of stylesheet functions to a Subreddit."""
def __call__(self):
"""Return the subreddit's stylesheet.
To be used as:
.. code:: python
stylesheet = reddit.subreddit('SUBREDDIT').stylesheet()
"""
url = API_PATH["about_stylesheet"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url)
def __init__(self, subreddit):
"""Create a SubredditStylesheet instance.
:param subreddit: The subreddit associated with the stylesheet.
An instance of this class is provided as:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet
"""
self.subreddit = subreddit
def _update_structured_styles(self, style_data):
url = API_PATH["structured_styles"].format(subreddit=self.subreddit)
self.subreddit._reddit.patch(url, style_data)
def _upload_image(self, image_path, data):
with open(image_path, "rb") as image:
header = image.read(len(JPEG_HEADER))
image.seek(0)
data["img_type"] = "jpg" if header == JPEG_HEADER else "png"
url = API_PATH["upload_image"].format(subreddit=self.subreddit)
response = self.subreddit._reddit.post(
url, data=data, files={"file": image}
)
if response["errors"]:
error_type = response["errors"][0]
error_value = response.get("errors_values", [""])[0]
assert error_type in [
"BAD_CSS_NAME",
"IMAGE_ERROR",
], "Please file a bug with PRAW"
raise APIException(error_type, error_value, None)
return response
def _upload_style_asset(self, image_path, image_type):
data = {"imagetype": image_type, "filepath": basename(image_path)}
data["mimetype"] = "image/jpeg"
if image_path.lower().endswith(".png"):
data["mimetype"] = "image/png"
url = API_PATH["style_asset_lease"].format(subreddit=self.subreddit)
upload_lease = self.subreddit._reddit.post(url, data=data)[
"s3UploadLease"
]
upload_data = {
item["name"]: item["value"] for item in upload_lease["fields"]
}
upload_url = "https:{}".format(upload_lease["action"])
with open(image_path, "rb") as image:
response = self.subreddit._reddit._core._requestor._http.post(
upload_url, data=upload_data, files={"file": image}
)
response.raise_for_status()
return "{}/{}".format(upload_url, upload_data["key"])
def delete_banner(self):
"""Remove the current subreddit (redesign) banner image.
Succeeds even if there is no banner image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_banner()
"""
data = {"bannerBackgroundImage": ""}
self._update_structured_styles(data)
def delete_banner_additional_image(self):
"""Remove the current subreddit (redesign) banner additional image.
Succeeds even if there is no additional image. Will also delete any
configured hover image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_banner_additional_image()
"""
data = {
"bannerPositionedImage": "",
"secondaryBannerPositionedImage": "",
}
self._update_structured_styles(data)
def delete_banner_hover_image(self):
"""Remove the current subreddit (redesign) banner hover image.
Succeeds even if there is no hover image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_banner_hover_image()
"""
data = {"secondaryBannerPositionedImage": ""}
self._update_structured_styles(data)
def delete_header(self):
"""Remove the current subreddit header image.
Succeeds even if there is no header image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_header()
"""
url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def delete_image(self, name):
"""Remove the named image from the subreddit.
Succeeds even if the named image does not exist.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_image('smile')
"""
url = API_PATH["delete_sr_image"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"img_name": name})
def delete_mobile_header(self):
"""Remove the current subreddit mobile header.
Succeeds even if there is no mobile header.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_mobile_header()
"""
url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def delete_mobile_icon(self):
"""Remove the current subreddit mobile icon.
Succeeds even if there is no mobile icon.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_mobile_icon()
"""
url = API_PATH["delete_sr_icon"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def update(self, stylesheet, reason=None):
"""Update the subreddit's stylesheet.
:param stylesheet: The CSS for the new stylesheet.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.update(
'p { color: green; }', 'color text green')
"""
data = {
"op": "save",
"reason": reason,
"stylesheet_contents": stylesheet,
}
url = API_PATH["subreddit_stylesheet"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def upload(self, name, image_path):
"""Upload an image to the Subreddit.
:param name: The name to use for the image. If an image already exists
with the same name, it will be replaced.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload('smile', 'img.png')
"""
return self._upload_image(
image_path, {"name": name, "upload_type": "img"}
)
def upload_banner(self, image_path):
"""Upload an image for the subreddit's (redesign) banner image.
:param image_path: A path to a jpeg or png image.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_banner('banner.png')
"""
image_type = "bannerBackgroundImage"
image_url = self._upload_style_asset(image_path, image_type)
self._update_structured_styles({image_type: image_url})
def upload_banner_additional_image(self, image_path, align=None):
"""Upload an image for the subreddit's (redesign) additional image.
:param image_path: A path to a jpeg or png image.
:param align: Either ``left``, ``centered``, or ``right``. (default:
``left``).
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_banner_additional_image('banner.png')
"""
alignment = {}
if align is not None:
if align not in {"left", "centered", "right"}:
raise ValueError(
"align argument must be either "
"`left`, `centered`, or `right`"
)
alignment["bannerPositionedImagePosition"] = align
image_type = "bannerPositionedImage"
image_url = self._upload_style_asset(image_path, image_type)
style_data = {image_type: image_url}
if alignment:
style_data.update(alignment)
self._update_structured_styles(style_data)
def upload_banner_hover_image(self, image_path):
"""Upload an image for the subreddit's (redesign) additional image.
:param image_path: A path to a jpeg or png image.
Fails if the Subreddit does not have an additional image defined
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_banner_hover_image('banner.png')
"""
image_type = "secondaryBannerPositionedImage"
image_url = self._upload_style_asset(image_path, image_type)
self._update_structured_styles({image_type: image_url})
def upload_header(self, image_path):
"""Upload an image to be used as the Subreddit's header image.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_header('header.png')
"""
return self._upload_image(image_path, {"upload_type": "header"})
def upload_mobile_header(self, image_path):
"""Upload an image to be used as the Subreddit's mobile header.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
For example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_mobile_header(
'header.png')
"""
return self._upload_image(image_path, {"upload_type": "banner"})
def upload_mobile_icon(self, image_path):
"""Upload an image to be used as the Subreddit's mobile icon.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
For example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_mobile_icon(
'icon.png')
"""
return self._upload_image(image_path, {"upload_type": "icon"})
class SubredditWiki(object):
"""Provides a set of moderation functions to a Subreddit."""
def __getitem__(self, page_name):
"""Lazily return the WikiPage for the subreddit named ``page_name``.
This method is to be used to fetch a specific wikipage, like so:
.. code:: python
wikipage = reddit.subreddit('iama').wiki['proof']
print(wikipage.content_md)
"""
return WikiPage(
self.subreddit._reddit, self.subreddit, page_name.lower()
)
def __init__(self, subreddit):
"""Create a SubredditModeration instance.
:param subreddit: The subreddit to moderate.
"""
self.banned = SubredditRelationship(subreddit, "wikibanned")
self.contributor = SubredditRelationship(subreddit, "wikicontributor")
self.subreddit = subreddit
def __iter__(self):
"""Iterate through the pages of the wiki.
This method is to be used to discover all wikipages for a subreddit:
.. code:: python
for wikipage in reddit.subreddit('iama').wiki:
print(wikipage)
"""
response = self.subreddit._reddit.get(
API_PATH["wiki_pages"].format(subreddit=self.subreddit),
params={"unique": self.subreddit._reddit._next_unique},
)
for page_name in response["data"]:
yield WikiPage(self.subreddit._reddit, self.subreddit, page_name)
def create(self, name, content, reason=None, **other_settings):
"""Create a new wiki page.
:param name: The name of the new WikiPage. This name will be
normalized.
:param content: The content of the new WikiPage.
:param reason: (Optional) The reason for the creation.
:param other_settings: Additional keyword arguments to pass.
To create the wiki page ``'praw_test'`` in ``'/r/test'`` try:
.. code:: python
reddit.subreddit('test').wiki.create(
'praw_test', 'wiki body text', reason='PRAW Test Creation')
"""
name = name.replace(" ", "_").lower()
new = WikiPage(self.subreddit._reddit, self.subreddit, name)
new.edit(content=content, reason=reason, **other_settings)
return new
def revisions(self, **generator_kwargs):
"""Return a generator for recent wiki revisions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To view the wiki revisions for ``'praw_test'`` in ``'/r/test'`` try:
.. code:: python
for item in reddit.subreddit('test').wiki['praw_test'].revisions():
print(item)
"""
url = API_PATH["wiki_revisions"].format(subreddit=self.subreddit)
return WikiPage._revision_generator(
self.subreddit, url, generator_kwargs
)
| bsd-2-clause |
infoxchange/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/urlpatterns_reverse/namespace_urls.py | 51 | 1463 | from django.conf.urls.defaults import *
class URLObject(object):
def __init__(self, app_name, namespace):
self.app_name = app_name
self.namespace = namespace
def urls(self):
return patterns('',
url(r'^inner/$', 'empty_view', name='urlobject-view'),
url(r'^inner/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view', name='urlobject-view'),
), self.app_name, self.namespace
urls = property(urls)
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
urlpatterns = patterns('regressiontests.urlpatterns_reverse.views',
url(r'^normal/$', 'empty_view', name='normal-view'),
url(r'^normal/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view', name='normal-view'),
(r'^test1/', include(testobj1.urls)),
(r'^test2/', include(testobj2.urls)),
(r'^default/', include(default_testobj.urls)),
(r'^other1/', include(otherobj1.urls)),
(r'^other2/', include(otherobj2.urls)),
(r'^ns-included1/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
(r'^ns-included2/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
(r'^included/', include('regressiontests.urlpatterns_reverse.included_namespace_urls')),
)
| gpl-3.0 |
matsjoyce/python-krunner | src/krunner.py | 1 | 2880 | import _krunner
import abc
from PyQt5 import QtCore
__all__ = ["AbstractRunner", "QueryMatch", "RunnerContext", "RunnerSyntax"]
for name in __all__[1:]:
cls = getattr(_krunner.Plasma, name)
globals()[name] = cls
cls.__module__ = __name__
del name
del cls
class ARMeta(type(_krunner.Plasma.AbstractRunner), abc.ABCMeta):
pass
class AbstractRunner(_krunner.Plasma.AbstractRunner, metaclass=ARMeta):
def __init__(self, parent, args):
# Using parent() seems to remove the type
self._parent = parent
super().__init__(parent, args)
# Public
@abc.abstractmethod
def match(self, runnercontext):
pass
def hasRunOptions(self):
return self._parent.hasRunOptions()
def speed(self):
return self._parent.speed()
def priority(self):
return self._parent.priority()
def ignoredTypes(self):
return self._parent.ignoredTypes()
def setIgnoredTypes(self, types):
return self._parent.setIgnoredTypes(_krunner.Plasma.RunnerContext.Types(types))
def name(self):
return self._parent.name()
def id(self):
return self._parent.id()
def description(self):
return self._parent.description()
def icon(self):
return self._parent.icon()
def syntaxes(self):
return self._parent.syntaxes()
def defaultSyntax(self):
return self._parent.defaultSyntax()
def isMatchingSuspended(self):
return self._parent.isMatchingSuspended()
# Private
def suspendMatching(self, suspend):
return self._parent.suspendMatching(suspend)
def setHasRunOptions(self, hasRunOptions):
return self._parent.setHasRunOptions(hasRunOptions)
def setSpeed(self, newSpeed):
return self._parent.setSpeed(newSpeed)
def setPriority(self, newPriority):
return self._parent.setPriority(newPriority)
def addAction(self, id, icon_or_action, text=None):
if text is None:
return self._parent.addAction(id, icon_or_action)
else:
return self._parent.addAction(id, icon_or_action, text)
def removeAction(self, id):
return self._parent.removeAction(id)
def action(self, id):
return self._parent.action(id)
def actions(self):
return self._parent.actions()
def clearActions(self):
return self._parent.clearActions()
def addSyntax(self, syntax):
return self._parent.addSyntax(syntax)
def setDefaultSyntax(self, syntax):
return self._parent.setDefaultSyntax(syntax)
def setSyntaxes(self, syns):
return self._parent.setSyntaxes(syns)
def _except_hook(type, value, tb):
# Used by plasma_runner_python to stop qFatal being called by PyQt5
import traceback
print("Exception in runner:")
traceback.print_exception(type, value, tb)
| lgpl-3.0 |
e-mission/e-mission-server | emission/analysis/plotting/leaflet_osm/ipython_helper.py | 2 | 1415 | """
Helper functions that can display leaflet maps inline in an ipython notebook
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import *
import branca.element as bre
def inline_map(m):
"""
Embeds the HTML source of the map directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
"""
fig = bre.Figure()
fig.add_subplot(1,1,1).add_child(m)
return fig
def inline_maps(map_list):
"""
Embeds the HTML source of the map_list directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
map_list: 2-D array of maps. dimensions should be [nRows][nCols]. The method will throw a RuntimeError if not
nRows: Number of rows
nCols: Number of columns
"""
ncols = 2
nrows = (len(map_list)/ncols) + 1
fig = bre.Figure()
for i, m in enumerate(map_list):
fig.add_subplot(nrows,ncols,i+1).add_child(m)
return fig
| bsd-3-clause |
MakMukhi/grpc | src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py | 901 | 1528 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
blisseth/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
joferkington/numpy | doc/postprocess.py | 143 | 1481 | #!/usr/bin/env python
"""
%prog MODE FILES...
Post-processes HTML and Latex files output by Sphinx.
MODE is either 'html' or 'tex'.
"""
from __future__ import division, absolute_import, print_function
import re
import optparse
import io
def main():
p = optparse.OptionParser(__doc__)
options, args = p.parse_args()
if len(args) < 1:
p.error('no mode given')
mode = args.pop(0)
if mode not in ('html', 'tex'):
p.error('unknown mode %s' % mode)
for fn in args:
f = io.open(fn, 'r', encoding="utf-8")
try:
if mode == 'html':
lines = process_html(fn, f.readlines())
elif mode == 'tex':
lines = process_tex(f.readlines())
finally:
f.close()
f = io.open(fn, 'w', encoding="utf-8")
f.write("".join(lines))
f.close()
def process_html(fn, lines):
return lines
def process_tex(lines):
"""
Remove unnecessary section titles from the LaTeX file.
"""
new_lines = []
for line in lines:
if (line.startswith(r'\section{numpy.')
or line.startswith(r'\subsection{numpy.')
or line.startswith(r'\subsubsection{numpy.')
or line.startswith(r'\paragraph{numpy.')
or line.startswith(r'\subparagraph{numpy.')
):
pass # skip!
else:
new_lines.append(line)
return new_lines
if __name__ == "__main__":
main()
| bsd-3-clause |
arne-cl/pattern | pattern/web/pdf/encodingdb.py | 56 | 1548 | #!/usr/bin/env python2
import re
from psparser import PSLiteral
from glyphlist import glyphname2unicode
from latin_enc import ENCODING
## name2unicode
##
STRIP_NAME = re.compile(r'[0-9]+')
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m: raise KeyError(name)
return unichr(int(m.group(0)))
## EncodingDB
##
class EncodingDB(object):
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name,std,mac,win,pdf) in ENCODING:
c = name2unicode(name)
if std: std2unicode[std] = c
if mac: mac2unicode[mac] = c
if win: win2unicode[win] = c
if pdf: pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = name2unicode(x.name)
except KeyError:
pass
cid += 1
return cid2unicode
| bsd-3-clause |
sekikn/incubator-airflow | airflow/providers/amazon/aws/operators/s3_bucket.py | 7 | 3993 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS S3 operators."""
from typing import Optional
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils.decorators import apply_defaults
class S3CreateBucketOperator(BaseOperator):
"""
This operator creates an S3 bucket
:param bucket_name: This is bucket name you want to create
:type bucket_name: str
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
:param region_name: AWS region_name. If not specified fetched from connection.
:type region_name: Optional[str]
"""
@apply_defaults
def __init__(
self,
*,
bucket_name: str,
aws_conn_id: Optional[str] = "aws_default",
region_name: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.region_name = region_name
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def execute(self, context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
if not s3_hook.check_for_bucket(self.bucket_name):
s3_hook.create_bucket(bucket_name=self.bucket_name, region_name=self.region_name)
self.log.info("Created bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s already exists", self.bucket_name)
class S3DeleteBucketOperator(BaseOperator):
"""
This operator deletes an S3 bucket
:param bucket_name: This is bucket name you want to delete
:type bucket_name: str
:param force_delete: Forcibly delete all objects in the bucket before deleting the bucket
:type force_delete: bool
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
def __init__(
self,
bucket_name: str,
force_delete: bool = False,
aws_conn_id: Optional[str] = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.force_delete = force_delete
self.aws_conn_id = aws_conn_id
def execute(self, context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
if s3_hook.check_for_bucket(self.bucket_name):
s3_hook.delete_bucket(bucket_name=self.bucket_name, force_delete=self.force_delete)
self.log.info("Deleted bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s doesn't exist", self.bucket_name)
| apache-2.0 |
roselleebarle04/django | tests/flatpages_tests/test_sitemaps.py | 380 | 1326 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.sites.models import Site
from django.test import TestCase
from django.test.utils import modify_settings, override_settings
@override_settings(
ROOT_URLCONF='flatpages_tests.urls',
SITE_ID=1,
)
@modify_settings(
INSTALLED_APPS={
'append': ['django.contrib.sitemaps', 'django.contrib.flatpages'],
},
)
class FlatpagesSitemapTests(TestCase):
@classmethod
def setUpClass(cls):
super(FlatpagesSitemapTests, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
@classmethod
def setUpTestData(cls):
Site = apps.get_model('sites.Site')
current_site = Site.objects.get_current()
current_site.flatpage_set.create(url="/foo/", title="foo")
current_site.flatpage_set.create(url="/private-foo/", title="private foo", registration_required=True)
def test_flatpage_sitemap(self):
response = self.client.get('/flatpages/sitemap.xml')
self.assertIn(b'<url><loc>http://example.com/foo/</loc></url>', response.getvalue())
self.assertNotIn(b'<url><loc>http://example.com/private-foo/</loc></url>', response.getvalue())
| bsd-3-clause |
blaggacao/odoo | addons/mail/tests/test_mail_group.py | 140 | 3964 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailGroup(TestMail):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_mail_group_access_rights(self):
""" Testing mail_group access rights and basic mail_thread features """
cr, uid, user_noone_id, user_employee_id = self.cr, self.uid, self.user_noone_id, self.user_employee_id
# Do: Bert reads Jobs -> ok, public
self.mail_group.read(cr, user_noone_id, [self.group_jobs_id])
# Do: Bert read Pigs -> ko, restricted to employees
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_noone_id, [self.group_pigs_id])
# Do: Raoul read Pigs -> ok, belong to employees
self.mail_group.read(cr, user_employee_id, [self.group_pigs_id])
# Do: Bert creates a group -> ko, no access rights
with self.assertRaises(AccessError):
self.mail_group.create(cr, user_noone_id, {'name': 'Test'})
# Do: Raoul creates a restricted group -> ok
new_group_id = self.mail_group.create(cr, user_employee_id, {'name': 'Test'})
# Do: Bert added in followers, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_noone_id])
self.mail_group.read(cr, user_noone_id, [new_group_id])
# Do: Raoul reads Priv -> ko, private
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul added in follower, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_employee_id])
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul write on Jobs -> ok
self.mail_group.write(cr, user_employee_id, [self.group_priv_id], {'name': 'modified'})
# Do: Bert cannot write on Private -> ko (read but no write)
with self.assertRaises(AccessError):
self.mail_group.write(cr, user_noone_id, [self.group_priv_id], {'name': 're-modified'})
# Test: Bert cannot unlink the group
with self.assertRaises(except_orm):
self.mail_group.unlink(cr, user_noone_id, [self.group_priv_id])
# Do: Raoul unlinks the group, there are no followers and messages left
self.mail_group.unlink(cr, user_employee_id, [self.group_priv_id])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(fol_ids, 'unlinked document should not have any followers left')
msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(msg_ids, 'unlinked document should not have any followers left')
| agpl-3.0 |
meetsandeepan/meetsandeepan.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/token.py | 365 | 5662 | # -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
| mit |
sameetb-cuelogic/edx-platform-test | lms/djangoapps/certificates/migrations/0006_auto__chg_field_generatedcertificate_certificate_id.py | 188 | 7408 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GeneratedCertificate.certificate_id'
db.alter_column('certificates_generatedcertificate', 'certificate_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True))
def backwards(self, orm):
# Changing field 'GeneratedCertificate.certificate_id'
db.alter_column('certificates_generatedcertificate', 'certificate_id', self.gf('django.db.models.fields.CharField')(default=None, max_length=32))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'graded_certificate_id': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'graded_download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
pymedusa/SickRage | ext/guessit/rules/properties/video_codec.py | 4 | 4959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
video_codec and video_profile property
"""
from rebulk import Rebulk, Rule, RemoveMatch
from rebulk.remodule import re
from ..common import dash
from ..common.pattern import is_disabled
from ..common.validators import seps_after, seps_before, seps_surround
def video_codec(config): # pylint:disable=unused-argument
"""
Builder for rebulk object.
:param config: rule configuration
:type config: dict
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk()
rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True)
rebulk.defaults(name="video_codec",
tags=['source-suffix', 'streaming_service.suffix'],
disabled=lambda context: is_disabled(context, 'video_codec'))
rebulk.regex(r'Rv\d{2}', value='RealVideo')
rebulk.regex('Mpe?g-?2', '[hx]-?262', value='MPEG-2')
rebulk.string("DVDivX", "DivX", value="DivX")
rebulk.string('XviD', value='Xvid')
rebulk.regex('VC-?1', value='VC-1')
rebulk.string('VP7', value='VP7')
rebulk.string('VP8', 'VP80', value='VP8')
rebulk.string('VP9', value='VP9')
rebulk.regex('[hx]-?263', value='H.263')
rebulk.regex('[hx]-?264', '(MPEG-?4)?AVC(?:HD)?', value='H.264')
rebulk.regex('[hx]-?265', 'HEVC', value='H.265')
rebulk.regex('(?P<video_codec>hevc)(?P<color_depth>10)', value={'video_codec': 'H.265', 'color_depth': '10-bit'},
tags=['video-codec-suffix'], children=True)
# http://blog.mediacoderhq.com/h264-profiles-and-levels/
# https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC
rebulk.defaults(clear=True,
name="video_profile",
validator=seps_surround,
disabled=lambda context: is_disabled(context, 'video_profile'))
rebulk.string('BP', value='Baseline', tags='video_profile.rule')
rebulk.string('XP', 'EP', value='Extended', tags='video_profile.rule')
rebulk.string('MP', value='Main', tags='video_profile.rule')
rebulk.string('HP', 'HiP', value='High', tags='video_profile.rule')
# https://en.wikipedia.org/wiki/Scalable_Video_Coding
rebulk.string('SC', 'SVC', value='Scalable Video Coding', tags='video_profile.rule')
# https://en.wikipedia.org/wiki/AVCHD
rebulk.regex('AVC(?:HD)?', value='Advanced Video Codec High Definition', tags='video_profile.rule')
# https://en.wikipedia.org/wiki/H.265/HEVC
rebulk.string('HEVC', value='High Efficiency Video Coding', tags='video_profile.rule')
rebulk.regex('Hi422P', value='High 4:2:2')
rebulk.regex('Hi444PP', value='High 4:4:4 Predictive')
rebulk.regex('Hi10P?', value='High 10') # no profile validation is required
rebulk.string('DXVA', value='DXVA', name='video_api',
disabled=lambda context: is_disabled(context, 'video_api'))
rebulk.defaults(clear=True,
name='color_depth',
validator=seps_surround,
disabled=lambda context: is_disabled(context, 'color_depth'))
rebulk.regex('12.?bits?', value='12-bit')
rebulk.regex('10.?bits?', 'YUV420P10', 'Hi10P?', value='10-bit')
rebulk.regex('8.?bits?', value='8-bit')
rebulk.rules(ValidateVideoCodec, VideoProfileRule)
return rebulk
class ValidateVideoCodec(Rule):
"""
Validate video_codec with source property or separated
"""
priority = 64
consequence = RemoveMatch
def enabled(self, context):
return not is_disabled(context, 'video_codec')
def when(self, matches, context):
ret = []
for codec in matches.named('video_codec'):
if not seps_before(codec) and \
not matches.at_index(codec.start - 1, lambda match: 'video-codec-prefix' in match.tags):
ret.append(codec)
continue
if not seps_after(codec) and \
not matches.at_index(codec.end + 1, lambda match: 'video-codec-suffix' in match.tags):
ret.append(codec)
continue
return ret
class VideoProfileRule(Rule):
"""
Rule to validate video_profile
"""
consequence = RemoveMatch
def enabled(self, context):
return not is_disabled(context, 'video_profile')
def when(self, matches, context):
profile_list = matches.named('video_profile', lambda match: 'video_profile.rule' in match.tags)
ret = []
for profile in profile_list:
codec = matches.at_span(profile.span, lambda match: match.name == 'video_codec', 0)
if not codec:
codec = matches.previous(profile, lambda match: match.name == 'video_codec')
if not codec:
codec = matches.next(profile, lambda match: match.name == 'video_codec')
if not codec:
ret.append(profile)
return ret
| gpl-3.0 |
ZhangXinNan/tensorflow | tensorflow/python/saved_model/loader_impl.py | 5 | 14273 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader implementation for SavedModel with hermetic, language-neutral exports.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
def _parse_saved_model(export_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
export_dir: Directory containing the SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
try:
file_content = file_io.FileIO(path_to_pb, "rb").read()
saved_model.ParseFromString(file_content)
return saved_model
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
elif file_io.file_exists(path_to_pbtxt):
try:
file_content = file_io.FileIO(path_to_pbtxt, "rb").read()
text_format.Merge(file_content.decode("utf-8"), saved_model)
return saved_model
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e)))
else:
raise IOError("SavedModel file does not exist at: %s/{%s|%s}" %
(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT,
constants.SAVED_MODEL_FILENAME_PB))
def _get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):
"""Gets the asset tensors, if defined in the meta graph def to load.
Args:
export_dir: Directory where the SavedModel is located.
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
import_scope: Optional `string` -- if specified, prepend this followed by
'/' to all returned asset tensor names.
Returns:
A dictionary of asset tensors, keyed by the name of the asset tensor. The
value in the map corresponds to the absolute path of the asset file.
"""
# Collection-def that may contain the assets key.
collection_def = meta_graph_def_to_load.collection_def
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
# Location of the assets for SavedModel.
assets_directory = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value
# Process each asset and add it to the asset tensor dictionary.
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
tensor_name = asset_proto.tensor_info.name
if import_scope:
tensor_name = "%s/%s" % (import_scope, tensor_name)
asset_tensor_dict[tensor_name] = os.path.join(
compat.as_bytes(assets_directory),
compat.as_bytes(asset_proto.filename))
return asset_tensor_dict
def _get_main_op_tensor(
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
collection_def = meta_graph_def_to_load.collection_def
main_op_tensor = None
if init_op_key in collection_def:
main_ops = collection_def[init_op_key].node_list.value
if len(main_ops) != 1:
raise RuntimeError("Expected exactly one SavedModel main op. "
"Found: {}".format(main_ops))
main_op_tensor = ops.get_collection(init_op_key)[0]
return main_op_tensor
@tf_export("saved_model.loader.maybe_saved_model_directory")
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
@tf_export("saved_model.loader.load")
def load(sess, tags, export_dir, import_scope=None, **saver_kwargs):
"""Loads the model from a SavedModel as specified by tags.
Args:
sess: The TensorFlow session to restore the variables.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
export_dir: Directory in which the SavedModel protocol buffer and variables
to be loaded are located.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: Optional keyword arguments passed through to Saver.
Returns:
The `MetaGraphDef` protocol buffer loaded in the provided session. This
can be used to further extract signature-defs, collection-defs, etc.
Raises:
RuntimeError: MetaGraphDef associated with the tags cannot be found.
"""
loader = SavedModelLoader(export_dir)
return loader.load(sess, tags, import_scope, **saver_kwargs)
class SavedModelLoader(object):
"""Load graphs and restore variable values from a `SavedModel`."""
def __init__(self, export_dir):
"""Creates a `SavedModelLoader`.
Args:
export_dir: Directory in which the SavedModel protocol buffer and
variables to be loaded are located.
"""
self._export_dir = export_dir
self._variables_path = saved_model_utils.get_variables_path(export_dir)
self._saved_model = _parse_saved_model(export_dir)
@property
def export_dir(self):
"""Directory containing the SavedModel."""
return self._export_dir
@property
def variables_path(self):
"""Path to variable checkpoint files."""
return self._variables_path
@property
def saved_model(self):
"""SavedModel object parsed from the export directory."""
return self._saved_model
def get_meta_graph_def_from_tags(self, tags):
"""Return MetaGraphDef with the exact specified tags.
Args:
tags: A list or set of string tags that identify the MetaGraphDef.
Returns:
MetaGraphDef with the same tags.
Raises:
RuntimeError: if no metagraphs were found with the associated tags.
"""
found_match = False
for meta_graph_def in self._saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(tags):
meta_graph_def_to_load = meta_graph_def
found_match = True
break
if not found_match:
raise RuntimeError(
"MetaGraphDef associated with tags " + str(tags).strip("[]") +
" could not be found in SavedModel. To inspect available tag-sets in"
" the SavedModel, please use the SavedModel CLI: `saved_model_cli`"
)
return meta_graph_def_to_load
def load_graph(self, graph, tags, import_scope=None, **saver_kwargs):
"""Load ops and nodes from SavedModel MetaGraph into graph.
Args:
graph: tf.Graph object.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
A tuple of
* Saver defined by the MetaGraph, which can be used to restore the
variable values.
* List of `Operation`/`Tensor` objects returned from
`tf.import_graph_def` (may be `None`).
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with graph.as_default():
return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
meta_graph_def, import_scope=import_scope, **saver_kwargs)
def restore_variables(self, sess, saver, import_scope=None):
"""Restore SavedModel variable values into the session.
Args:
sess: tf.Session to restore variable values.
saver: a tf.train.Saver object. Can be None if there are no variables in
graph. This may be the saver returned by the load_graph() function, or a
default `tf.train.Saver()`.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
Raises:
ValueError: if no saver was passed to the saver argument, and there are
variables in the graph.
"""
with sess.graph.as_default():
if (saver is None and
not variables._all_saveable_objects(scope=import_scope)): # pylint: disable=protected-access
tf_logging.info("The specified SavedModel has no variables; no "
"checkpoints were restored.")
elif isinstance(saver, tf_saver.Saver):
saver.restore(sess, self._variables_path)
else:
raise ValueError(
"No tf.train.Saver object was passed to the function "
"SavedModelLoader.restore_variables. Since there are variables in "
"the graph, a saver is required.")
def run_init_ops(self, sess, tags, import_scope=None):
"""Run initialization ops defined in the `MetaGraphDef`.
Args:
sess: tf.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with sess.graph.as_default():
# Get asset tensors, if any.
asset_tensors_dictionary = _get_asset_tensors(
self._export_dir, meta_graph_def, import_scope=import_scope)
main_op_tensor = (
_get_main_op_tensor(meta_graph_def, constants.MAIN_OP_KEY) or
_get_main_op_tensor(meta_graph_def, constants.LEGACY_INIT_OP_KEY))
if main_op_tensor is not None:
sess.run(fetches=[main_op_tensor], feed_dict=asset_tensors_dictionary)
def load(self, sess, tags, import_scope=None, **saver_kwargs):
"""Load the MetaGraphDef graph and restore variable values into the session.
Args:
sess: tf.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
`MetagraphDef` proto of the graph that was loaded.
"""
with sess.graph.as_default():
saver, _ = self.load_graph(sess.graph, tags, import_scope,
**saver_kwargs)
self.restore_variables(sess, saver, import_scope)
self.run_init_ops(sess, tags, import_scope)
return self.get_meta_graph_def_from_tags(tags)
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.